1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#if _MSC_VER
18#define _SCL_SECURE_NO_WARNINGS
19#endif
20
21#include "tbb/concurrent_vector.h"
22#include "tbb/tbb_allocator.h"
23#include "tbb/cache_aligned_allocator.h"
24#include "tbb/tbb_exception.h"
25#include <cstdio>
26#include <cstdlib>
27#include <functional>
28#include <vector>
29#include <numeric>
30#include "harness_report.h"
31#include "harness_assert.h"
32#include "harness_allocator.h"
33#include "harness_defs.h"
34#include "test_container_move_support.h"
35
36#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
37 // Workaround for overzealous compiler warnings in /Wp64 mode
38 #pragma warning (push)
39 #pragma warning (disable: 4800)
40#endif
41
42#if TBB_USE_EXCEPTIONS
43static bool known_issue_verbose = false;
44#define KNOWN_ISSUE(msg) if(!known_issue_verbose) known_issue_verbose = true, REPORT(msg)
45#endif /* TBB_USE_EXCEPTIONS */
46
47inline void NextSize( int& s ) {
48 if( s<=32 ) ++s;
49 else s += s/10;
50}
51
52//! Check vector have expected size and filling
53template<typename vector_t>
54static void CheckVector( const vector_t& cv, size_t expected_size, size_t old_size ) {
55 ASSERT( cv.capacity()>=expected_size, NULL );
56 ASSERT( cv.size()==expected_size, NULL );
57 ASSERT( cv.empty()==(expected_size==0), NULL );
58 for( int j=0; j<int(expected_size); ++j ) {
59 if( cv[j].bar()!=~j )
60 REPORT("ERROR on line %d for old_size=%ld expected_size=%ld j=%d\n",__LINE__,long(old_size),long(expected_size),j);
61 }
62}
63
64//! Test of assign, grow, copying with various sizes
65void TestResizeAndCopy() {
66 typedef static_counting_allocator<debug_allocator<Foo,std::allocator>, std::size_t> allocator_t;
67 typedef tbb::concurrent_vector<Foo, allocator_t> vector_t;
68 allocator_t::init_counters();
69 for( int old_size=0; old_size<=128; NextSize( old_size ) ) {
70 for( int new_size=0; new_size<=1280; NextSize( new_size ) ) {
71 size_t count = FooCount;
72 vector_t v;
73 ASSERT( count==FooCount, NULL );
74 v.assign(old_size/2, Foo() );
75 ASSERT( count+old_size/2==FooCount, NULL );
76 for( int j=0; j<old_size/2; ++j )
77 ASSERT( v[j].state == Foo::CopyInitialized, NULL);
78 v.assign(FooIterator(0), FooIterator(old_size));
79 v.resize(new_size, Foo(33) );
80 ASSERT( count+new_size==FooCount, NULL );
81 for( int j=0; j<new_size; ++j ) {
82 int expected = j<old_size ? j : 33;
83 if( v[j].bar()!=expected )
84 REPORT("ERROR on line %d for old_size=%ld new_size=%ld v[%ld].bar()=%d != %d\n",__LINE__,long(old_size),long(new_size),long(j),v[j].bar(), expected);
85 }
86 ASSERT( v.size()==size_t(new_size), NULL );
87 for( int j=0; j<new_size; ++j ) {
88 v[j].bar() = ~j;
89 }
90 const vector_t& cv = v;
91 // Try copy constructor
92 vector_t copy_of_v(cv);
93 CheckVector(cv,new_size,old_size);
94 ASSERT( !(v != copy_of_v), NULL );
95 v.clear();
96 ASSERT( v.empty(), NULL );
97 swap(v, copy_of_v);
98 ASSERT( copy_of_v.empty(), NULL );
99 CheckVector(v,new_size,old_size);
100 }
101 }
102 ASSERT( allocator_t::items_allocated == allocator_t::items_freed, NULL);
103 ASSERT( allocator_t::allocations == allocator_t::frees, NULL);
104}
105
106//! Test reserve, compact, capacity
107void TestCapacity() {
108 typedef static_counting_allocator<debug_allocator<Foo,tbb::cache_aligned_allocator>, std::size_t> allocator_t;
109 typedef tbb::concurrent_vector<Foo, allocator_t> vector_t;
110 allocator_t::init_counters();
111 for( size_t old_size=0; old_size<=11000; old_size=(old_size<5 ? old_size+1 : 3*old_size) ) {
112 for( size_t new_size=0; new_size<=11000; new_size=(new_size<5 ? new_size+1 : 3*new_size) ) {
113 size_t count = FooCount;
114 {
115 vector_t v; v.reserve(old_size);
116 ASSERT( v.capacity()>=old_size, NULL );
117 v.reserve( new_size );
118 ASSERT( v.capacity()>=old_size, NULL );
119 ASSERT( v.capacity()>=new_size, NULL );
120 ASSERT( v.empty(), NULL );
121 size_t fill_size = 2*new_size;
122 for( size_t i=0; i<fill_size; ++i ) {
123 ASSERT( size_t(FooCount)==count+i, NULL );
124 size_t j = v.grow_by(1) - v.begin();
125 ASSERT( j==i, NULL );
126 v[j].bar() = int(~j);
127 }
128 vector_t copy_of_v(v); // should allocate first segment with same size as for shrink_to_fit()
129 if(__TBB_Log2(/*reserved size*/old_size|1) > __TBB_Log2(fill_size|1) )
130 ASSERT( v.capacity() != copy_of_v.capacity(), NULL );
131 v.shrink_to_fit();
132 ASSERT( v.capacity() == copy_of_v.capacity(), NULL );
133 CheckVector(v, new_size*2, old_size); // check vector correctness
134 ASSERT( v==copy_of_v, NULL ); // TODO: check also segments layout equality
135 }
136 ASSERT( FooCount==count, NULL );
137 }
138 }
139 ASSERT( allocator_t::items_allocated == allocator_t::items_freed, NULL);
140 ASSERT( allocator_t::allocations == allocator_t::frees, NULL);
141}
142
143struct AssignElement {
144 typedef tbb::concurrent_vector<int>::range_type::iterator iterator;
145 iterator base;
146 void operator()( const tbb::concurrent_vector<int>::range_type& range ) const {
147 for( iterator i=range.begin(); i!=range.end(); ++i ) {
148 if( *i!=0 )
149 REPORT("ERROR for v[%ld]\n", long(i-base));
150 *i = int(i-base);
151 }
152 }
153 AssignElement( iterator base_ ) : base(base_) {}
154};
155
156struct CheckElement {
157 typedef tbb::concurrent_vector<int>::const_range_type::iterator iterator;
158 iterator base;
159 void operator()( const tbb::concurrent_vector<int>::const_range_type& range ) const {
160 for( iterator i=range.begin(); i!=range.end(); ++i )
161 if( *i != int(i-base) )
162 REPORT("ERROR for v[%ld]\n", long(i-base));
163 }
164 CheckElement( iterator base_ ) : base(base_) {}
165};
166
167#include "tbb/tick_count.h"
168#include "tbb/parallel_for.h"
169#include "harness.h"
170
171//! Problem size
172const size_t N = 500000;
173
174//! Test parallel access by iterators
175void TestParallelFor( int nthread ) {
176 typedef tbb::concurrent_vector<int> vector_t;
177 vector_t v;
178 v.resize(N);
179 tbb::tick_count t0 = tbb::tick_count::now();
180 REMARK("Calling parallel_for with %ld threads\n",long(nthread));
181 tbb::parallel_for( v.range(10000), AssignElement(v.begin()) );
182 tbb::tick_count t1 = tbb::tick_count::now();
183 const vector_t& u = v;
184 tbb::parallel_for( u.range(10000), CheckElement(u.begin()) );
185 tbb::tick_count t2 = tbb::tick_count::now();
186 REMARK("Time for parallel_for: assign time = %8.5f, check time = %8.5f\n",
187 (t1-t0).seconds(),(t2-t1).seconds());
188 for( long i=0; size_t(i)<v.size(); ++i )
189 if( v[i]!=i )
190 REPORT("ERROR for v[%ld]\n", i);
191}
192
193template<typename Iterator1, typename Iterator2>
194void TestIteratorAssignment( Iterator2 j ) {
195 Iterator1 i(j);
196 ASSERT( i==j, NULL );
197 ASSERT( !(i!=j), NULL );
198 Iterator1 k;
199 k = j;
200 ASSERT( k==j, NULL );
201 ASSERT( !(k!=j), NULL );
202}
203
204template<typename Range1, typename Range2>
205void TestRangeAssignment( Range2 r2 ) {
206 Range1 r1(r2); r1 = r2;
207}
208
209template<typename Iterator, typename T>
210void TestIteratorTraits() {
211 AssertSameType( static_cast<typename Iterator::difference_type*>(0), static_cast<ptrdiff_t*>(0) );
212 AssertSameType( static_cast<typename Iterator::value_type*>(0), static_cast<T*>(0) );
213 AssertSameType( static_cast<typename Iterator::pointer*>(0), static_cast<T**>(0) );
214 AssertSameType( static_cast<typename Iterator::iterator_category*>(0), static_cast<std::random_access_iterator_tag*>(0) );
215 T x;
216 typename Iterator::reference xr = x;
217 typename Iterator::pointer xp = &x;
218 ASSERT( &xr==xp, NULL );
219}
220
221template<typename Vector, typename Iterator>
222void CheckConstIterator( const Vector& u, int i, const Iterator& cp ) {
223 typename Vector::const_reference pref = *cp;
224 if( pref.bar()!=i )
225 REPORT("ERROR for u[%ld] using const_iterator\n", long(i));
226 typename Vector::difference_type delta = cp-u.begin();
227 ASSERT( delta==i, NULL );
228 if( u[i].bar()!=i )
229 REPORT("ERROR for u[%ld] using subscripting\n", long(i));
230 ASSERT( u.begin()[i].bar()==i, NULL );
231}
232
233template<typename Iterator1, typename Iterator2, typename V>
234void CheckIteratorComparison( V& u ) {
235 V u2 = u;
236 Iterator1 i = u.begin();
237
238 for( int i_count=0; i_count<100; ++i_count ) {
239 Iterator2 j = u.begin();
240 Iterator2 i2 = u2.begin();
241 for( int j_count=0; j_count<100; ++j_count ) {
242 ASSERT( (i==j)==(i_count==j_count), NULL );
243 ASSERT( (i!=j)==(i_count!=j_count), NULL );
244 ASSERT( (i-j)==(i_count-j_count), NULL );
245 ASSERT( (i<j)==(i_count<j_count), NULL );
246 ASSERT( (i>j)==(i_count>j_count), NULL );
247 ASSERT( (i<=j)==(i_count<=j_count), NULL );
248 ASSERT( (i>=j)==(i_count>=j_count), NULL );
249 ASSERT( !(i==i2), NULL );
250 ASSERT( i!=i2, NULL );
251 ++j;
252 ++i2;
253 }
254 ++i;
255 }
256}
257
258template<typename Vector, typename T>
259void TestGrowToAtLeastWithSourceParameter(T const& src){
260 static const size_t vector_size = 10;
261 Vector v1(vector_size,src);
262 Vector v2;
263 v2.grow_to_at_least(vector_size,src);
264 ASSERT(v1==v2,"grow_to_at_least(vector_size,src) did not properly initialize new elements ?");
265}
266//! Test sequential iterators for vector type V.
267/** Also does timing. */
268template<typename T>
269void TestSequentialFor() {
270 typedef tbb::concurrent_vector<FooWithAssign> V;
271 V v(N);
272 ASSERT(v.grow_by(0) == v.grow_by(0, FooWithAssign()), NULL);
273
274 // Check iterator
275 tbb::tick_count t0 = tbb::tick_count::now();
276 typename V::iterator p = v.begin();
277 ASSERT( !(*p).is_const(), NULL );
278 ASSERT( !p->is_const(), NULL );
279 for( int i=0; size_t(i)<v.size(); ++i, ++p ) {
280 if( (*p).state!=Foo::DefaultInitialized )
281 REPORT("ERROR for v[%ld]\n", long(i));
282 typename V::reference pref = *p;
283 pref.bar() = i;
284 typename V::difference_type delta = p-v.begin();
285 ASSERT( delta==i, NULL );
286 ASSERT( -delta<=0, "difference type not signed?" );
287 }
288 tbb::tick_count t1 = tbb::tick_count::now();
289
290 // Check const_iterator going forwards
291 const V& u = v;
292 typename V::const_iterator cp = u.begin();
293 ASSERT( cp == v.cbegin(), NULL );
294 ASSERT( (*cp).is_const(), NULL );
295 ASSERT( cp->is_const(), NULL );
296 ASSERT( *cp == v.front(), NULL);
297 for( int i=0; size_t(i)<u.size(); ++i ) {
298 CheckConstIterator(u,i,cp);
299 V::const_iterator &cpr = ++cp;
300 ASSERT( &cpr == &cp, "pre-increment not returning a reference?");
301 }
302 tbb::tick_count t2 = tbb::tick_count::now();
303 REMARK("Time for serial for: assign time = %8.5f, check time = %8.5f\n",
304 (t1-t0).seconds(),(t2-t1).seconds());
305
306 // Now go backwards
307 cp = u.end();
308 ASSERT( cp == v.cend(), NULL );
309 for( int i=int(u.size()); i>0; ) {
310 --i;
311 V::const_iterator &cpr = --cp;
312 ASSERT( &cpr == &cp, "pre-decrement not returning a reference?");
313 if( i>0 ) {
314 typename V::const_iterator cp_old = cp--;
315 intptr_t here = (*cp_old).bar();
316 ASSERT( here==u[i].bar(), NULL );
317 typename V::const_iterator cp_new = cp++;
318 intptr_t prev = (*cp_new).bar();
319 ASSERT( prev==u[i-1].bar(), NULL );
320 }
321 CheckConstIterator(u,i,cp);
322 }
323
324 // Now go forwards and backwards
325 ptrdiff_t k = 0;
326 cp = u.begin();
327 for( size_t i=0; i<u.size(); ++i ) {
328 CheckConstIterator(u,int(k),cp);
329 typename V::difference_type delta = i*3 % u.size();
330 if( 0<=k+delta && size_t(k+delta)<u.size() ) {
331 V::const_iterator &cpr = (cp += delta);
332 ASSERT( &cpr == &cp, "+= not returning a reference?");
333 k += delta;
334 }
335 delta = i*7 % u.size();
336 if( 0<=k-delta && size_t(k-delta)<u.size() ) {
337 if( i&1 ) {
338 V::const_iterator &cpr = (cp -= delta);
339 ASSERT( &cpr == &cp, "-= not returning a reference?");
340 } else
341 cp = cp - delta; // Test operator-
342 k -= delta;
343 }
344 }
345
346 for( int i=0; size_t(i)<u.size(); i=(i<50?i+1:i*3) )
347 for( int j=-i; size_t(i+j)<u.size(); j=(j<50?j+1:j*5) ) {
348 ASSERT( (u.begin()+i)[j].bar()==i+j, NULL );
349 ASSERT( (v.begin()+i)[j].bar()==i+j, NULL );
350 ASSERT((v.cbegin()+i)[j].bar()==i+j, NULL );
351 ASSERT( (i+u.begin())[j].bar()==i+j, NULL );
352 ASSERT( (i+v.begin())[j].bar()==i+j, NULL );
353 ASSERT((i+v.cbegin())[j].bar()==i+j, NULL );
354 }
355
356 CheckIteratorComparison<typename V::iterator, typename V::iterator>(v);
357 CheckIteratorComparison<typename V::iterator, typename V::const_iterator>(v);
358 CheckIteratorComparison<typename V::const_iterator, typename V::iterator>(v);
359 CheckIteratorComparison<typename V::const_iterator, typename V::const_iterator>(v);
360
361 TestIteratorAssignment<typename V::const_iterator>( u.begin() );
362 TestIteratorAssignment<typename V::const_iterator>( v.begin() );
363 TestIteratorAssignment<typename V::const_iterator>( v.cbegin() );
364 TestIteratorAssignment<typename V::iterator>( v.begin() );
365 // doesn't compile as expected: TestIteratorAssignment<typename V::iterator>( u.begin() );
366
367 TestRangeAssignment<typename V::const_range_type>( u.range() );
368 TestRangeAssignment<typename V::const_range_type>( v.range() );
369 TestRangeAssignment<typename V::range_type>( v.range() );
370 // doesn't compile as expected: TestRangeAssignment<typename V::range_type>( u.range() );
371
372 // Check reverse_iterator
373 typename V::reverse_iterator rp = v.rbegin();
374 for( size_t i=v.size(); i>0; --i, ++rp ) {
375 typename V::reference pref = *rp;
376 ASSERT( size_t(pref.bar())==i-1, NULL );
377 ASSERT( rp!=v.rend(), NULL );
378 }
379 ASSERT( rp==v.rend(), NULL );
380
381 // Check const_reverse_iterator
382 typename V::const_reverse_iterator crp = u.rbegin();
383 ASSERT( crp == v.crbegin(), NULL );
384 ASSERT( *crp == v.back(), NULL);
385 for( size_t i=v.size(); i>0; --i, ++crp ) {
386 typename V::const_reference cpref = *crp;
387 ASSERT( size_t(cpref.bar())==i-1, NULL );
388 ASSERT( crp!=u.rend(), NULL );
389 }
390 ASSERT( crp == u.rend(), NULL );
391 ASSERT( crp == v.crend(), NULL );
392
393 TestIteratorAssignment<typename V::const_reverse_iterator>( u.rbegin() );
394 TestIteratorAssignment<typename V::reverse_iterator>( v.rbegin() );
395
396 // test compliance with C++ Standard 2003, clause 23.1.1p9
397 {
398 tbb::concurrent_vector<int> v1, v2(1, 100);
399 v1.assign(1, 100); ASSERT(v1 == v2, NULL);
400 ASSERT(v1.size() == 1 && v1[0] == 100, "used integral iterators");
401 }
402
403 // cross-allocator tests
404#if !defined(_WIN64) || defined(_CPPLIB_VER)
405 typedef local_counting_allocator<std::allocator<int>, size_t> allocator1_t;
406 typedef tbb::cache_aligned_allocator<int> allocator2_t;
407 typedef tbb::concurrent_vector<FooWithAssign, allocator1_t> V1;
408 typedef tbb::concurrent_vector<FooWithAssign, allocator2_t> V2;
409 V1 v1( v ); // checking cross-allocator copying
410 V2 v2( 10 ); v2 = v1; // checking cross-allocator assignment
411 ASSERT( (v1 == v) && !(v2 != v), NULL);
412 ASSERT( !(v1 < v) && !(v2 > v), NULL);
413 ASSERT( (v1 <= v) && (v2 >= v), NULL);
414#endif
415}
416
417namespace test_grow_to_at_least_helpers {
418 template<typename MyVector >
419 class GrowToAtLeast: NoAssign {
420 typedef typename MyVector::const_reference const_reference;
421
422 const bool my_use_two_args_form ;
423 MyVector& my_vector;
424 const_reference my_init_from;
425 public:
426 void operator()( const tbb::blocked_range<size_t>& range ) const {
427 for( size_t i=range.begin(); i!=range.end(); ++i ) {
428 size_t n = my_vector.size();
429 size_t req = (i % (2*n+1))+1;
430
431 typename MyVector::iterator p;
432 Foo::State desired_state;
433 if (my_use_two_args_form){
434 p = my_vector.grow_to_at_least(req,my_init_from);
435 desired_state = Foo::CopyInitialized;
436 }else{
437 p = my_vector.grow_to_at_least(req);
438 desired_state = Foo::DefaultInitialized;
439 }
440 if( p-my_vector.begin() < typename MyVector::difference_type(req) )
441 ASSERT( p->state == desired_state || p->state == Foo::ZeroInitialized, NULL );
442 ASSERT( my_vector.size()>=req, NULL );
443 }
444 }
445 GrowToAtLeast(bool use_two_args_form, MyVector& vector, const_reference init_from )
446 : my_use_two_args_form(use_two_args_form), my_vector(vector), my_init_from(init_from) {}
447 };
448}
449
450template<bool use_two_arg_form>
451void TestConcurrentGrowToAtLeastImpl() {
452 using namespace test_grow_to_at_least_helpers;
453 typedef static_counting_allocator< tbb::zero_allocator<Foo> > MyAllocator;
454 typedef tbb::concurrent_vector<Foo, MyAllocator> MyVector;
455 Foo copy_from;
456 MyAllocator::init_counters();
457 MyVector v(2, Foo(), MyAllocator());
458 for( size_t s=1; s<1000; s*=10 ) {
459 tbb::parallel_for( tbb::blocked_range<size_t>(0,10000*s,s), GrowToAtLeast<MyVector>(use_two_arg_form, v, copy_from), tbb::simple_partitioner() );
460 }
461 v.clear();
462 ASSERT( 0 == v.get_allocator().frees, NULL);
463 v.shrink_to_fit();
464 size_t items_allocated = v.get_allocator().items_allocated,
465 items_freed = v.get_allocator().items_freed;
466 size_t allocations = v.get_allocator().allocations,
467 frees = v.get_allocator().frees;
468 ASSERT( items_allocated == items_freed, NULL);
469 ASSERT( allocations == frees, NULL);
470}
471
472void TestConcurrentGrowToAtLeast() {
473 TestConcurrentGrowToAtLeastImpl<false>();
474 TestConcurrentGrowToAtLeastImpl<true>();
475}
476
477struct grain_map: NoAssign {
478 enum grow_method_enum {
479 grow_by_range = 1,
480 grow_by_default,
481 grow_by_copy,
482 grow_by_init_list,
483 push_back,
484 push_back_move,
485 emplace_back,
486 last_method
487 };
488
489 struct range_part {
490 size_t number_of_parts;
491 grain_map::grow_method_enum method;
492 bool distribute;
493 Foo::State expected_element_state;
494 };
495
496 const std::vector<range_part> distributed;
497 const std::vector<range_part> batched;
498 const size_t total_number_of_parts;
499
500 grain_map(const range_part* begin, const range_part* end)
501 : distributed(separate(begin,end, &distributed::is_not))
502 , batched(separate(begin,end, &distributed::is_yes))
503 , total_number_of_parts(std::accumulate(begin, end, (size_t)0, &sum_number_of_parts::sum))
504 {}
505
506private:
507 struct sum_number_of_parts{
508 static size_t sum(size_t accumulator, grain_map::range_part const& rp){ return accumulator + rp.number_of_parts;}
509 };
510
511 template <typename functor_t>
512 static std::vector<range_part> separate(const range_part* begin, const range_part* end, functor_t f){
513 std::vector<range_part> part;
514 part.reserve(std::distance(begin,end));
515 //copy all that false==f(*it)
516 std::remove_copy_if(begin, end, std::back_inserter(part), f);
517
518 return part;
519 }
520
521 struct distributed {
522 static bool is_not(range_part const& rp){ return !rp.distribute;}
523 static bool is_yes(range_part const& rp){ return rp.distribute;}
524 };
525};
526
527//! Test concurrent invocations of method concurrent_vector::grow_by
528template<typename MyVector>
529class GrowBy: NoAssign {
530 MyVector& my_vector;
531 const grain_map& my_grain_map;
532 size_t my_part_weight;
533public:
534 void operator()( const tbb::blocked_range<size_t>& range ) const {
535 ASSERT( range.begin() < range.end(), NULL );
536
537 size_t current_adding_index_in_cvector = range.begin();
538
539 for(size_t index=0; index < my_grain_map.batched.size(); ++index){
540 const grain_map::range_part& batch_part = my_grain_map.batched[index];
541 const size_t number_of_items_to_add = batch_part.number_of_parts * my_part_weight;
542 const size_t end = current_adding_index_in_cvector + number_of_items_to_add;
543
544 switch(batch_part.method){
545 case grain_map::grow_by_range : {
546 my_vector.grow_by(FooIterator(current_adding_index_in_cvector),FooIterator(end));
547 } break;
548 case grain_map::grow_by_default : {
549 typename MyVector::iterator const s = my_vector.grow_by(number_of_items_to_add);
550 for( size_t k = 0; k < number_of_items_to_add; ++k )
551 s[k].bar() = current_adding_index_in_cvector + k;
552 } break;
553#if __TBB_INITIALIZER_LISTS_PRESENT
554 case grain_map::grow_by_init_list : {
555 FooIterator curr(current_adding_index_in_cvector);
556 for ( size_t k = 0; k < number_of_items_to_add; ++k ) {
557 if ( k + 4 < number_of_items_to_add ) {
558 my_vector.grow_by( { *curr++, *curr++, *curr++, *curr++, *curr++ } );
559 k += 4;
560 } else {
561 my_vector.grow_by( { *curr++ } );
562 }
563 }
564 ASSERT( curr == FooIterator(end), NULL );
565 } break;
566#endif
567 default : { ASSERT(false, "using unimplemented method of batch add in ConcurrentGrow test.");} break;
568 };
569
570 current_adding_index_in_cvector = end;
571 }
572
573 std::vector<size_t> items_left_to_add(my_grain_map.distributed.size());
574 for (size_t i=0; i<my_grain_map.distributed.size(); ++i ){
575 items_left_to_add[i] = my_grain_map.distributed[i].number_of_parts * my_part_weight;
576 }
577
578 for (;current_adding_index_in_cvector < range.end(); ++current_adding_index_in_cvector){
579 size_t method_index = current_adding_index_in_cvector % my_grain_map.distributed.size();
580
581 if (! items_left_to_add[method_index]) {
582 struct not_zero{
583 static bool is(size_t items_to_add){ return items_to_add;}
584 };
585 method_index = std::distance(items_left_to_add.begin(), std::find_if(items_left_to_add.begin(), items_left_to_add.end(), &not_zero::is));
586 ASSERT(method_index < my_grain_map.distributed.size(), "incorrect test setup - wrong expected distribution: left free space but no elements to add?");
587 };
588
589 ASSERT(items_left_to_add[method_index], "logic error ?");
590 const grain_map::range_part& distributed_part = my_grain_map.distributed[method_index];
591
592 typename MyVector::iterator r;
593 typename MyVector::value_type source;
594 source.bar() = current_adding_index_in_cvector;
595
596 switch(distributed_part.method){
597 case grain_map::grow_by_default : {
598 (r = my_vector.grow_by(1))->bar() = current_adding_index_in_cvector;
599 } break;
600 case grain_map::grow_by_copy : {
601 r = my_vector.grow_by(1, source);
602 } break;
603 case grain_map::push_back : {
604 r = my_vector.push_back(source);
605 } break;
606#if __TBB_CPP11_RVALUE_REF_PRESENT
607 case grain_map::push_back_move : {
608 r = my_vector.push_back(std::move(source));
609 } break;
610#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
611 case grain_map::emplace_back : {
612 r = my_vector.emplace_back(current_adding_index_in_cvector);
613 } break;
614#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
615#endif //__TBB_CPP11_RVALUE_REF_PRESENT
616
617 default : { ASSERT(false, "using unimplemented method of batch add in ConcurrentGrow test.");} break;
618 };
619
620 ASSERT( static_cast<size_t>(r->bar()) == current_adding_index_in_cvector, NULL );
621 }
622 }
623
624 GrowBy( MyVector& vector, const grain_map& m, size_t part_weight )
625 : my_vector(vector)
626 , my_grain_map(m)
627 , my_part_weight(part_weight)
628 {
629 }
630};
631
632const grain_map::range_part concurrent_grow_single_range_map [] = {
633// number_of_parts, method, distribute, expected_element_state
634 {3, grain_map::grow_by_range, false,
635 #if __TBB_CPP11_RVALUE_REF_PRESENT
636 Foo::MoveInitialized
637 #else
638 Foo::CopyInitialized
639 #endif
640 },
641#if __TBB_INITIALIZER_LISTS_PRESENT && !__TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN
642 {1, grain_map::grow_by_init_list, false, Foo::CopyInitialized},
643#endif
644 {2, grain_map::grow_by_default, false, Foo::DefaultInitialized},
645 {1, grain_map::grow_by_default, true, Foo::DefaultInitialized},
646 {1, grain_map::grow_by_copy, true, Foo::CopyInitialized},
647 {1, grain_map::push_back, true, Foo::CopyInitialized},
648#if __TBB_CPP11_RVALUE_REF_PRESENT
649 {1, grain_map::push_back_move, true, Foo::MoveInitialized},
650#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
651 {1, grain_map::emplace_back, true, Foo::DirectInitialized},
652#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
653#endif //__TBB_CPP11_RVALUE_REF_PRESENT
654};
655
656//! Test concurrent invocations of grow methods
657void TestConcurrentGrowBy( int nthread ) {
658
659 typedef static_counting_allocator<debug_allocator<Foo> > MyAllocator;
660 typedef tbb::concurrent_vector<Foo, MyAllocator> MyVector;
661
662#if __TBB_INITIALIZER_LISTS_PRESENT && __TBB_CPP11_INIT_LIST_TEMP_OBJS_LIFETIME_BROKEN
663 static bool is_reported = false;
664 if ( !is_reported ) {
665 REPORT( "Known issue: concurrent tests of grow_by(std::initializer_list) are skipped.\n" );
666 is_reported = true;
667 }
668#endif
669
670 MyAllocator::init_counters();
671 {
672 grain_map m(concurrent_grow_single_range_map, Harness::end(concurrent_grow_single_range_map));
673
674 static const size_t desired_grain_size = 100;
675
676 static const size_t part_weight = desired_grain_size / m.total_number_of_parts;
677 static const size_t grain_size = part_weight * m.total_number_of_parts;
678 static const size_t number_of_grains = 8; //this should be (power of two) in order to get minimal ranges equal to grain_size
679 static const size_t range_size = grain_size * number_of_grains;
680
681 MyAllocator a;
682 MyVector v( a );
683 tbb::parallel_for( tbb::blocked_range<size_t>(0,range_size,grain_size), GrowBy<MyVector>(v, m, part_weight), tbb::simple_partitioner() );
684 ASSERT( v.size()==size_t(range_size), NULL );
685
686 // Verify that v is a permutation of 0..m
687 size_t inversions = 0, direct_inits = 0, def_inits = 0, copy_inits = 0, move_inits = 0;
688 std::vector<bool> found(range_size, 0);
689 for( size_t i=0; i<range_size; ++i ) {
690 if( v[i].state == Foo::DefaultInitialized ) ++def_inits;
691 else if( v[i].state == Foo::DirectInitialized ) ++direct_inits;
692 else if( v[i].state == Foo::CopyInitialized ) ++copy_inits;
693 else if( v[i].state == Foo::MoveInitialized ) ++move_inits;
694 else {
695 REMARK("i: %d ", i);
696 ASSERT( false, "v[i] seems not initialized");
697 }
698 intptr_t index = v[i].bar();
699 ASSERT( !found[index], NULL );
700 found[index] = true;
701 if( i>0 )
702 inversions += v[i].bar()<v[i-1].bar();
703 }
704 for( size_t i=0; i<range_size; ++i ) {
705 ASSERT( found[i], NULL );
706 ASSERT( nthread>1 || v[i].bar() == static_cast<intptr_t>(i), "sequential execution is wrong" );
707 }
708
709 REMARK("Initialization by default constructor: %d, by copy: %d, by move: %d\n", def_inits, copy_inits, move_inits);
710
711 size_t expected_direct_inits = 0, expected_def_inits = 0, expected_copy_inits = 0, expected_move_inits = 0;
712 for (size_t i=0; i<Harness::array_length(concurrent_grow_single_range_map); ++i){
713 const grain_map::range_part& rp =concurrent_grow_single_range_map[i];
714 switch (rp.expected_element_state){
715 case Foo::DefaultInitialized: { expected_def_inits += rp.number_of_parts ; } break;
716 case Foo::DirectInitialized: { expected_direct_inits += rp.number_of_parts ;} break;
717 case Foo::MoveInitialized: { expected_move_inits += rp.number_of_parts ;} break;
718 case Foo::CopyInitialized: { expected_copy_inits += rp.number_of_parts ;} break;
719 default: {ASSERT(false, "unexpected expected state");}break;
720 };
721 }
722
723 expected_def_inits *= part_weight * number_of_grains;
724 expected_move_inits *= part_weight * number_of_grains;
725 expected_copy_inits *= part_weight * number_of_grains;
726 expected_direct_inits *= part_weight * number_of_grains;
727
728 ASSERT( def_inits == expected_def_inits , NULL);
729 ASSERT( copy_inits == expected_copy_inits , NULL);
730 ASSERT( move_inits == expected_move_inits , NULL);
731 ASSERT( direct_inits == expected_direct_inits , NULL);
732
733 if( nthread>1 && inversions<range_size/20 )
734 REPORT("Warning: not much concurrency in TestConcurrentGrowBy (%d inversions)\n", inversions);
735 }
736 //TODO: factor this into separate thing, as it seems to used in big number of tests
737 size_t items_allocated = MyAllocator::items_allocated,
738 items_freed = MyAllocator::items_freed;
739 size_t allocations = MyAllocator::allocations,
740 frees = MyAllocator::frees;
741 ASSERT( items_allocated == items_freed, NULL);
742 ASSERT( allocations == frees, NULL);
743}
744
745template <typename Vector>
746void test_grow_by_empty_range( Vector &v, typename Vector::value_type* range_begin_end ) {
747 const Vector v_copy = v;
748 ASSERT( v.grow_by( range_begin_end, range_begin_end ) == v.end(), "grow_by(empty_range) returned a wrong iterator." );
749 ASSERT( v == v_copy, "grow_by(empty_range) has changed the vector." );
750}
751
752void TestSerialGrowByRange( bool fragmented_vector ) {
753 tbb::concurrent_vector<int> v;
754 if ( fragmented_vector ) {
755 v.reserve( 1 );
756 }
757 int init_range[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
758 ASSERT( v.grow_by( init_range, init_range + (Harness::array_length( init_range )) ) == v.begin(), "grow_by(I,I) returned a wrong iterator." );
759 ASSERT( std::equal( v.begin(), v.end(), init_range ), "grow_by(I,I) did not properly copied all elements ?" );
760 test_grow_by_empty_range( v, init_range );
761 test_grow_by_empty_range( v, (int*)NULL );
762}
763
764//TODO: move this to more appropriate place, smth like test_harness.cpp
765void TestArrayLength(){
766 int five_element_array[5] = {0};
767 ASSERT(Harness::array_length(five_element_array)==5,"array_length failed to determine length of non empty non dynamic array");
768}
769
770#if __TBB_INITIALIZER_LISTS_PRESENT
771#include "test_initializer_list.h"
772
773struct test_grow_by {
774 template<typename container_type, typename element_type>
775 static void do_test( std::initializer_list<element_type> const& il, container_type const& expected ) {
776 container_type vd;
777 vd.grow_by( il );
778 ASSERT( vd == expected, "grow_by with an initializer list failed" );
779 }
780};
781
782void TestInitList() {
783 REMARK( "testing initializer_list methods \n" );
784 using namespace initializer_list_support_tests;
785 TestInitListSupport<tbb::concurrent_vector<char>, test_grow_by>( { 1, 2, 3, 4, 5 } );
786 TestInitListSupport<tbb::concurrent_vector<int>, test_grow_by>( {} );
787}
788#endif //if __TBB_INITIALIZER_LISTS_PRESENT
789
790#if __TBB_RANGE_BASED_FOR_PRESENT
791#include "test_range_based_for.h"
792
793void TestRangeBasedFor(){
794 using namespace range_based_for_support_tests;
795
796 REMARK("testing range based for loop compatibility \n");
797 typedef tbb::concurrent_vector<int> c_vector;
798 c_vector a_c_vector;
799
800 const int sequence_length = 100;
801 for (int i =1; i<= sequence_length; ++i){
802 a_c_vector.push_back(i);
803 }
804
805 ASSERT( range_based_for_accumulate(a_c_vector, std::plus<int>(), 0) == gauss_summ_of_int_sequence(sequence_length), "incorrect accumulated value generated via range based for ?");
806}
807#endif //if __TBB_RANGE_BASED_FOR_PRESENT
808
809#if TBB_USE_EXCEPTIONS
810#endif //TBB_USE_EXCEPTIONS
811
812#if __TBB_CPP11_RVALUE_REF_PRESENT
813namespace move_semantics_helpers{
814 struct move_only_type:NoCopy{
815 const int* my_pointer;
816 move_only_type(move_only_type && other): my_pointer(other.my_pointer){other.my_pointer=NULL;}
817 explicit move_only_type(const int* value): my_pointer(value) {}
818 };
819}
820
821void TestPushBackMoveOnlyContainee(){
822 using namespace move_semantics_helpers;
823 typedef tbb::concurrent_vector<move_only_type > vector_t;
824 vector_t v;
825 static const int magic_number =7;
826 move_only_type src(&magic_number);
827 v.push_back(std::move(src));
828 ASSERT(v[0].my_pointer == &magic_number,"item was incorrectly moved during push_back?");
829 ASSERT(src.my_pointer == NULL,"item was incorrectly moved during push_back?");
830}
831
832namespace emplace_helpers{
833 struct wrapper_type:NoCopy{
834 int value1;
835 int value2;
836 explicit wrapper_type(int v1, int v2) : value1 (v1), value2(v2) {}
837 friend bool operator==(const wrapper_type& lhs, const wrapper_type& rhs){
838 return (lhs.value1 == rhs.value1) && (lhs.value2 == rhs.value2 );
839 }
840 };
841}
842#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
843//TODO: extend the test to number of types e.g. std::string
844void TestEmplaceBack(){
845 using namespace emplace_helpers;
846 typedef tbb::concurrent_vector<wrapper_type > vector_t;
847 vector_t v;
848 v.emplace_back(1,2);
849 ASSERT(v[0] == wrapper_type(1,2),"incorrectly in-place constructed item during emplace_back?");
850}
851#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
852#endif //__TBB_CPP11_RVALUE_REF_PRESENT
853
854//! Test the assignment operator and swap
855void TestAssign() {
856 typedef tbb::concurrent_vector<FooWithAssign, local_counting_allocator<std::allocator<FooWithAssign>, size_t > > vector_t;
857 local_counting_allocator<std::allocator<FooWithAssign>, size_t > init_alloc;
858 init_alloc.allocations = 100;
859 for( int dst_size=1; dst_size<=128; NextSize( dst_size ) ) {
860 for( int src_size=2; src_size<=128; NextSize( src_size ) ) {
861 vector_t u(FooIterator(0), FooIterator(src_size), init_alloc);
862 for( int i=0; i<src_size; ++i )
863 ASSERT( u[i].bar()==i, NULL );
864 vector_t v(dst_size, FooWithAssign(), init_alloc);
865 for( int i=0; i<dst_size; ++i ) {
866 ASSERT( v[i].state==Foo::CopyInitialized, NULL );
867 v[i].bar() = ~i;
868 }
869 ASSERT( v != u, NULL);
870 v.swap(u);
871 CheckVector(u, dst_size, src_size);
872 u.swap(v);
873 // using assignment
874 v = u;
875 ASSERT( v == u, NULL);
876 u.clear();
877 ASSERT( u.size()==0, NULL );
878 ASSERT( v.size()==size_t(src_size), NULL );
879 for( int i=0; i<src_size; ++i )
880 ASSERT( v[i].bar()==i, NULL );
881 ASSERT( 0 == u.get_allocator().frees, NULL);
882 u.shrink_to_fit(); // deallocate unused memory
883 size_t items_allocated = u.get_allocator().items_allocated,
884 items_freed = u.get_allocator().items_freed;
885 size_t allocations = u.get_allocator().allocations,
886 frees = u.get_allocator().frees + 100;
887 ASSERT( items_allocated == items_freed, NULL);
888 ASSERT( allocations == frees, NULL);
889 }
890 }
891}
892
893struct c_vector_type : default_container_traits {
894 template<typename element_type, typename allocator_type>
895 struct apply{
896 typedef tbb::concurrent_vector<element_type, allocator_type > type;
897 };
898
899 typedef FooIterator init_iterator_type;
900 enum{ expected_number_of_items_to_allocate_for_steal_move = 0 };
901
902 template<typename element_type, typename allocator_type, typename iterator>
903 static bool equal(tbb::concurrent_vector<element_type, allocator_type > const& c, iterator begin, iterator end){
904 bool equal_sizes = (size_t)std::distance(begin, end) == c.size();
905 return equal_sizes && std::equal(c.begin(), c.end(), begin);
906 }
907};
908
909#if __TBB_CPP11_RVALUE_REF_PRESENT
910void TestSerialGrowByWithMoveIterators(){
911 typedef default_stateful_fixture_make_helper<c_vector_type>::type fixture_t;
912 typedef fixture_t::container_t vector_t;
913
914 fixture_t fixture("TestSerialGrowByWithMoveIterators");
915
916 vector_t dst(fixture.dst_allocator);
917 dst.grow_by(std::make_move_iterator(fixture.source.begin()), std::make_move_iterator(fixture.source.end()));
918
919 fixture.verify_content_deep_moved(dst);
920}
921
922#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
923namespace test_move_in_shrink_to_fit_helpers {
924 struct dummy : Harness::StateTrackable<>{
925 int i;
926 dummy(int an_i) __TBB_NOEXCEPT(true) : Harness::StateTrackable<>(0), i(an_i) {}
927#if !__TBB_IMPLICIT_MOVE_PRESENT || __TBB_NOTHROW_MOVE_MEMBERS_IMPLICIT_GENERATION_BROKEN
928 dummy(const dummy &src) __TBB_NOEXCEPT(true) : Harness::StateTrackable<>(src), i(src.i) {}
929 dummy(dummy &&src) __TBB_NOEXCEPT(true) : Harness::StateTrackable<>(std::move(src)), i(src.i) {}
930
931 dummy& operator=(dummy &&src) __TBB_NOEXCEPT(true) {
932 Harness::StateTrackable<>::operator=(std::move(src));
933 i = src.i;
934 return *this;
935 }
936
937 //somehow magically this declaration make std::is_nothrow_move_constructible<pod>::value to works correctly on icc14+msvc2013
938 ~dummy() __TBB_NOEXCEPT(true) {}
939#endif //!__TBB_IMPLICIT_MOVE_PRESENT || __TBB_NOTHROW_MOVE_MEMBERS_IMPLICIT_GENERATION_BROKEN
940 friend bool operator== (const dummy &lhs, const dummy &rhs){ return lhs.i == rhs.i; }
941 };
942}
943void TestSerialMoveInShrinkToFit(){
944 const char* test_name = "TestSerialMoveInShrinkToFit";
945 REMARK("running %s \n", test_name);
946 using test_move_in_shrink_to_fit_helpers::dummy;
947
948 __TBB_STATIC_ASSERT(std::is_nothrow_move_constructible<dummy>::value,"incorrect test setup or broken configuration?");
949 {
950 dummy src(0);
951 ASSERT_IN_TEST(is_state<Harness::StateTrackableBase::MoveInitialized>(dummy(std::move_if_noexcept(src))),"broken configuration ?", test_name);
952 }
953 static const size_t sequence_size = 15;
954 typedef tbb::concurrent_vector<dummy> c_vector_t;
955 std::vector<dummy> source(sequence_size, 0);
956 std::generate_n(source.begin(), source.size(), std::rand);
957
958 c_vector_t c_vector;
959 c_vector.reserve(1); //make it fragmented
960
961 c_vector.assign(source.begin(), source.end());
962 memory_locations c_vector_before_shrink(c_vector);
963 c_vector.shrink_to_fit();
964
965 ASSERT_IN_TEST(c_vector_before_shrink.content_location_changed(c_vector), "incorrect test setup? shrink_to_fit should cause moving elements to other memory locations while it is not", test_name);
966 ASSERT_IN_TEST(all_of(c_vector, is_state_f<Harness::StateTrackableBase::MoveInitialized>()), "container did not move construct some elements?", test_name);
967 ASSERT_IN_TEST(c_vector == c_vector_t(source.begin(),source.end()),"",test_name);
968}
969#endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
970#endif //__TBB_CPP11_RVALUE_REF_PRESENT
971
972#include <string>
973
974// Test the comparison operators
975void TestComparison() {
976 std::string str[3]; str[0] = "abc";
977 str[1].assign("cba");
978 str[2].assign("abc"); // same as 0th
979 tbb::concurrent_vector<char> var[3];
980 var[0].assign(str[0].begin(), str[0].end());
981 var[1].assign(str[0].rbegin(), str[0].rend());
982 var[2].assign(var[1].rbegin(), var[1].rend()); // same as 0th
983 for (int i = 0; i < 3; ++i) {
984 for (int j = 0; j < 3; ++j) {
985 ASSERT( (var[i] == var[j]) == (str[i] == str[j]), NULL );
986 ASSERT( (var[i] != var[j]) == (str[i] != str[j]), NULL );
987 ASSERT( (var[i] < var[j]) == (str[i] < str[j]), NULL );
988 ASSERT( (var[i] > var[j]) == (str[i] > str[j]), NULL );
989 ASSERT( (var[i] <= var[j]) == (str[i] <= str[j]), NULL );
990 ASSERT( (var[i] >= var[j]) == (str[i] >= str[j]), NULL );
991 }
992 }
993}
994
995//------------------------------------------------------------------------
996// Regression test for problem where on oversubscription caused
997// concurrent_vector::grow_by to run very slowly (TR#196).
998//------------------------------------------------------------------------
999
1000#include "tbb/task_scheduler_init.h"
1001#include <math.h>
1002
1003typedef unsigned long Number;
1004
1005static tbb::concurrent_vector<Number> Primes;
1006
1007class FindPrimes {
1008 bool is_prime( Number val ) const {
1009 int limit, factor = 3;
1010 if( val<5u )
1011 return val==2;
1012 else {
1013 limit = long(sqrtf(float(val))+0.5f);
1014 while( factor<=limit && val % factor )
1015 ++factor;
1016 return factor>limit;
1017 }
1018 }
1019public:
1020 void operator()( const tbb::blocked_range<Number>& r ) const {
1021 for( Number i=r.begin(); i!=r.end(); ++i ) {
1022 if( i%2 && is_prime(i) ) {
1023 Primes.push_back( i );
1024 }
1025 }
1026 }
1027};
1028
1029double TimeFindPrimes( int nthread ) {
1030 Primes.clear();
1031 Primes.reserve(1000000);// TODO: or compact()?
1032 tbb::task_scheduler_init init(nthread);
1033 tbb::tick_count t0 = tbb::tick_count::now();
1034 tbb::parallel_for( tbb::blocked_range<Number>(0,1000000,500), FindPrimes() );
1035 tbb::tick_count t1 = tbb::tick_count::now();
1036 return (t1-t0).seconds();
1037}
1038
1039void TestFindPrimes() {
1040 // Time fully subscribed run.
1041 double t2 = TimeFindPrimes( tbb::task_scheduler_init::automatic );
1042
1043 // Time parallel run that is very likely oversubscribed.
1044 double t128 = TimeFindPrimes(128);
1045 REMARK("TestFindPrimes: t2==%g t128=%g k=%g\n", t2, t128, t128/t2);
1046
1047 // We allow the 128-thread run a little extra time to allow for thread overhead.
1048 // Theoretically, following test will fail on machine with >128 processors.
1049 // But that situation is not going to come up in the near future,
1050 // and the generalization to fix the issue is not worth the trouble.
1051 if( t128 > 1.3*t2 ) {
1052 REPORT("Warning: grow_by is pathetically slow: t2==%g t128=%g k=%g\n", t2, t128, t128/t2);
1053 }
1054}
1055
1056//------------------------------------------------------------------------
1057// Test compatibility with STL sort.
1058//------------------------------------------------------------------------
1059
1060#include <algorithm>
1061
1062void TestSort() {
1063 for( int n=0; n<100; n=n*3+1 ) {
1064 tbb::concurrent_vector<int> array(n);
1065 for( int i=0; i<n; ++i )
1066 array.at(i) = (i*7)%n;
1067 std::sort( array.begin(), array.end() );
1068 for( int i=0; i<n; ++i )
1069 ASSERT( array[i]==i, NULL );
1070 }
1071}
1072
1073#if TBB_USE_EXCEPTIONS
1074
1075template<typename c_vector>
1076size_t get_early_size(c_vector & v){
1077 return v.grow_by(0) - v.begin();
1078}
1079
1080void verify_c_vector_size(size_t size, size_t capacity, size_t early_size, const char * const test_name){
1081 ASSERT_IN_TEST( size <= capacity, "", test_name);
1082 ASSERT_IN_TEST( early_size >= size, "", test_name);
1083}
1084
1085template<typename c_vector_t>
1086void verify_c_vector_size(c_vector_t & c_v, const char * const test_name){
1087 verify_c_vector_size(c_v.size(), c_v.capacity(), get_early_size(c_v), test_name);
1088}
1089
1090void verify_c_vector_capacity_is_below(size_t capacity, size_t high, const char * const test_name){
1091 ASSERT_IN_TEST(capacity > 0, "unexpected capacity", test_name);
1092 ASSERT_IN_TEST(capacity < high, "unexpected capacity", test_name);
1093}
1094
1095template<typename vector_t>
1096void verify_last_segment_allocation_failed(vector_t const& victim, const char* const test_name){
1097 ASSERT_THROWS_IN_TEST(victim.at(victim.size()), std::range_error, "",test_name );
1098}
1099
1100template<typename vector_t>
1101void verify_assignment_operator_throws_bad_last_alloc(vector_t & victim, const char* const test_name){
1102 vector_t copy_of_victim(victim, victim.get_allocator());
1103 ASSERT_THROWS_IN_TEST(victim = copy_of_victim, tbb::bad_last_alloc, "", test_name);
1104}
1105
1106template<typename vector_t>
1107void verify_copy_and_assign_from_produce_the_same(vector_t const& victim, const char* const test_name){
1108 //TODO: remove explicit copy of allocator when full support of C++11 allocator_traits in concurrent_vector is present
1109 vector_t copy_of_victim(victim, victim.get_allocator());
1110 ASSERT_IN_TEST(copy_of_victim == victim, "copy doesn't match original", test_name);
1111 vector_t copy_of_victim2(10, victim[0], victim.get_allocator());
1112 copy_of_victim2 = victim;
1113 ASSERT_IN_TEST(copy_of_victim == copy_of_victim2, "assignment doesn't match copying", test_name);
1114}
1115
1116template<typename allocator_t>
1117void verify_vector_partially_copied(
1118 tbb::concurrent_vector<FooWithAssign, allocator_t> const& victim, size_t planned_victim_size,
1119 tbb::concurrent_vector<FooWithAssign, allocator_t> const& src, bool is_memory_allocation_failure ,const char* const test_name)
1120{
1121 if (is_memory_allocation_failure) { // allocator generated exception
1122 typedef tbb::concurrent_vector<FooWithAssign, allocator_t> vector_t;
1123 ASSERT_IN_TEST( victim == vector_t(src.begin(), src.begin() + victim.size(), src.get_allocator()), "failed to properly copy of source ?", test_name );
1124 }else{
1125 ASSERT_IN_TEST( std::equal(victim.begin(), victim.begin() + planned_victim_size, src.begin()), "failed to properly copy items before the exception?", test_name );
1126 ASSERT_IN_TEST( ::all_of( victim.begin() + planned_victim_size, victim.end(), is_state_f<Foo::ZeroInitialized>() ), "failed to zero-initialize items left not constructed after the exception?", test_name );
1127 }
1128}
1129
1130//------------------------------------------------------------------------
1131// Test exceptions safety (from allocator and items constructors)
1132//------------------------------------------------------------------------
1133void TestExceptions() {
1134 typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t;
1135 typedef tbb::concurrent_vector<FooWithAssign, allocator_t> vector_t;
1136
1137 enum methods {
1138 zero_method = 0,
1139 ctor_copy, ctor_size, assign_nt, assign_ir, reserve, compact,
1140 all_methods
1141 };
1142 ASSERT( !FooCount, NULL );
1143
1144 try {
1145 vector_t src(FooIterator(0), FooIterator(N)); // original data
1146
1147 for(int t = 0; t < 2; ++t) // exception type
1148 for(int m = zero_method+1; m < all_methods; ++m)
1149 {
1150 track_foo_count<__LINE__> check_all_foo_destroyed_on_exit("TestExceptions");
1151 track_allocator_memory<allocator_t> verify_no_leak_at_exit("TestExceptions");
1152 allocator_t::init_counters();
1153 if(t) MaxFooCount = FooCount + N/4;
1154 else allocator_t::set_limits(N/4);
1155 vector_t victim;
1156 try {
1157 switch(m) {
1158 case ctor_copy: {
1159 vector_t acopy(src);
1160 } break; // auto destruction after exception is checked by ~Foo
1161 case ctor_size: {
1162 vector_t sized(N);
1163 } break; // auto destruction after exception is checked by ~Foo
1164 // Do not test assignment constructor due to reusing of same methods as below
1165 case assign_nt: {
1166 victim.assign(N, FooWithAssign());
1167 } break;
1168 case assign_ir: {
1169 victim.assign(FooIterator(0), FooIterator(N));
1170 } break;
1171 case reserve: {
1172 try {
1173 victim.reserve(victim.max_size()+1);
1174 } catch(std::length_error &) {
1175 } catch(...) {
1176 KNOWN_ISSUE("ERROR: unrecognized exception - known compiler issue\n");
1177 }
1178 victim.reserve(N);
1179 } break;
1180 case compact: {
1181 if(t) MaxFooCount = 0; else allocator_t::set_limits(); // reset limits
1182 victim.reserve(2); victim = src; // fragmented assignment
1183 if(t) MaxFooCount = FooCount + 10; else allocator_t::set_limits(1, false); // block any allocation, check NULL return from allocator
1184 victim.shrink_to_fit(); // should start defragmenting first segment
1185 } break;
1186 default:;
1187 }
1188 if(!t || m != reserve) ASSERT(false, "should throw an exception");
1189 } catch(std::bad_alloc &e) {
1190 allocator_t::set_limits(); MaxFooCount = 0;
1191 size_t capacity = victim.capacity();
1192 size_t size = victim.size();
1193
1194 size_t req_size = get_early_size(victim);
1195
1196 verify_c_vector_size(size, capacity, req_size, "TestExceptions");
1197
1198 switch(m) {
1199 case reserve:
1200 if(t) ASSERT(false, NULL);
1201 __TBB_fallthrough;
1202 case assign_nt:
1203 case assign_ir:
1204 if(!t) {
1205 ASSERT(capacity < N/2, "unexpected capacity");
1206 ASSERT(size == 0, "unexpected size");
1207 break;
1208 } else {
1209 ASSERT(size == N, "unexpected size");
1210 ASSERT(capacity >= N, "unexpected capacity");
1211 int i;
1212 for(i = 1; ; ++i)
1213 if(!victim[i].zero_bar()) break;
1214 else ASSERT(victim[i].bar() == (m == assign_ir? i : initial_value_of_bar), NULL);
1215 for(; size_t(i) < size; ++i) ASSERT(!victim[i].zero_bar(), NULL);
1216 ASSERT(size_t(i) == size, NULL);
1217 break;
1218 }
1219 case compact:
1220 ASSERT(capacity > 0, "unexpected capacity");
1221 ASSERT(victim == src, "shrink_to_fit() is broken");
1222 break;
1223
1224 default:; // nothing to check here
1225 }
1226 REMARK("Exception %d: %s\t- ok\n", m, e.what());
1227 }
1228 }
1229 } catch(...) {
1230 ASSERT(false, "unexpected exception");
1231 }
1232}
1233
1234//TODO: split into two separate tests
1235//TODO: remove code duplication in exception safety tests
1236void TestExceptionSafetyGuaranteesForAssignOperator(){
1237 //TODO: use __FUNCTION__ for test name
1238 const char* const test_name = "TestExceptionSafetyGuaranteesForAssignOperator";
1239 typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t;
1240 typedef tbb::concurrent_vector<FooWithAssign, allocator_t> vector_t;
1241
1242 track_foo_count<__LINE__> check_all_foo_destroyed_on_exit(test_name);
1243 track_allocator_memory<allocator_t> verify_no_leak_at_exit(test_name);
1244
1245 vector_t src(FooIterator(0), FooIterator(N)); // original data
1246
1247 const size_t planned_victim_size = N/4;
1248
1249 for(int t = 0; t < 2; ++t) {// exception type
1250 vector_t victim;
1251 victim.reserve(2); // get fragmented assignment
1252
1253 ASSERT_THROWS_IN_TEST(
1254 {
1255 limit_foo_count_in_scope foo_limit(FooCount + planned_victim_size, t);
1256 limit_allocated_items_in_scope<allocator_t> allocator_limit(allocator_t::items_allocated + planned_victim_size, !t);
1257
1258 victim = src; // fragmented assignment
1259 },
1260 std::bad_alloc, "", test_name
1261 );
1262
1263 verify_c_vector_size(victim, test_name);
1264
1265 if(!t) {
1266 verify_c_vector_capacity_is_below(victim.capacity(), N, test_name);
1267 }
1268
1269 verify_vector_partially_copied(victim, planned_victim_size, src, !t, test_name);
1270 verify_last_segment_allocation_failed(victim, test_name);
1271 verify_copy_and_assign_from_produce_the_same(victim, test_name);
1272 verify_assignment_operator_throws_bad_last_alloc(victim, test_name);
1273 }
1274}
1275//TODO: split into two separate tests
1276void TestExceptionSafetyGuaranteesForConcurrentGrow(){
1277 const char* const test_name = "TestExceptionSafetyGuaranteesForConcurrentGrow";
1278 typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t;
1279 typedef tbb::concurrent_vector<FooWithAssign, allocator_t> vector_t;
1280
1281 track_foo_count<__LINE__> check_all_foo_destroyed_on_exit(test_name);
1282 track_allocator_memory<allocator_t> verify_no_leak_at_exit(test_name);
1283
1284 vector_t src(FooIterator(0), FooIterator(N)); // original data
1285
1286 const size_t planned_victim_size = N/4;
1287 static const int grain_size = 70;
1288
1289 tbb::task_scheduler_init init(2);
1290
1291 for(int t = 0; t < 2; ++t) {// exception type
1292 vector_t victim;
1293
1294#if TBB_USE_CAPTURED_EXCEPTION
1295 #define EXPECTED_EXCEPTION tbb::captured_exception
1296#else
1297 #define EXPECTED_EXCEPTION std::bad_alloc
1298#endif
1299
1300 ASSERT_THROWS_IN_TEST(
1301 {
1302 limit_foo_count_in_scope foo_limit(FooCount + 31, t); // these numbers help to reproduce the live lock for versions < TBB2.2
1303 limit_allocated_items_in_scope<allocator_t> allocator_limit(allocator_t::items_allocated + planned_victim_size, !t);
1304
1305 grain_map m(concurrent_grow_single_range_map, Harness::end(concurrent_grow_single_range_map));
1306
1307 static const size_t part_weight = grain_size / m.total_number_of_parts;
1308
1309 tbb::parallel_for(
1310 tbb::blocked_range<size_t>(0, N, grain_size),
1311 GrowBy<vector_t>(victim, m, part_weight)
1312 );
1313 },
1314 EXPECTED_EXCEPTION, "", test_name
1315 );
1316
1317 verify_c_vector_size(victim, test_name);
1318
1319 if(!t) {
1320 verify_c_vector_capacity_is_below(victim.capacity(), N, test_name);
1321 }
1322
1323 for(int i = 0; ; ++i) {
1324 try {
1325 Foo &foo = victim.at(i);
1326 ASSERT( foo.is_valid_or_zero(),"" );
1327 } catch(std::range_error &) { // skip broken segment
1328 ASSERT( size_t(i) < get_early_size(victim), NULL );
1329 } catch(std::out_of_range &){
1330 ASSERT( i > 0, NULL ); break;
1331 } catch(...) {
1332 KNOWN_ISSUE("ERROR: unrecognized exception - known compiler issue\n"); break;
1333 }
1334 }
1335
1336 verify_copy_and_assign_from_produce_the_same(victim, test_name);
1337 }
1338}
1339
1340#if __TBB_CPP11_RVALUE_REF_PRESENT
1341void TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorMemoryFailure(){
1342 const char* const test_name = "TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorMemoryFailure";
1343
1344 //TODO: add ability to inject debug_allocator into stateful_allocator_fixture::allocator_t
1345 //typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t;
1346 typedef default_stateful_fixture_make_helper<c_vector_type, Harness::false_type>::type fixture_t;
1347 typedef arena_allocator_fixture<FooWithAssign, Harness::false_type> arena_allocator_fixture_t;
1348 typedef fixture_t::allocator_t allocator_t;
1349 typedef fixture_t::container_t vector_t;
1350
1351 fixture_t fixture(test_name);
1352 arena_allocator_fixture_t arena_allocator_fixture(4 * fixture.container_size);
1353
1354 const size_t allocation_limit = fixture.container_size/4;
1355
1356 vector_t victim(arena_allocator_fixture.allocator);
1357 victim.reserve(2); // get fragmented assignment
1358
1359 ASSERT_THROWS_IN_TEST(
1360 {
1361 limit_allocated_items_in_scope<allocator_t> allocator_limit(allocator_t::items_allocated + allocation_limit);
1362 victim = std::move(fixture.source); // fragmented assignment
1363 },
1364 std::bad_alloc, "", test_name
1365 );
1366
1367 verify_c_vector_size(victim, test_name);
1368 verify_c_vector_capacity_is_below(victim.capacity(), allocation_limit + 2, test_name);
1369
1370 fixture.verify_part_of_content_deep_moved(victim, victim.size());
1371
1372 verify_last_segment_allocation_failed(victim, test_name);
1373 verify_copy_and_assign_from_produce_the_same(victim, test_name);
1374 verify_assignment_operator_throws_bad_last_alloc(victim, test_name);
1375}
1376
1377void TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorExceptionInElementCtor(){
1378 const char* const test_name = "TestExceptionSafetyGuaranteesForMoveAssignOperator";
1379 //typedef static_counting_allocator<debug_allocator<FooWithAssign>, std::size_t> allocator_t;
1380 typedef default_stateful_fixture_make_helper<c_vector_type, Harness::false_type>::type fixture_t;
1381 typedef arena_allocator_fixture<FooWithAssign, Harness::false_type> arena_allocator_fixture_t;
1382 typedef fixture_t::container_t vector_t;
1383
1384 fixture_t fixture(test_name);
1385 const size_t planned_victim_size = fixture.container_size/4;
1386 arena_allocator_fixture_t arena_allocator_fixture(4 * fixture.container_size);
1387
1388 vector_t victim(arena_allocator_fixture.allocator);
1389 victim.reserve(2); // get fragmented assignment
1390
1391 ASSERT_THROWS_IN_TEST(
1392 {
1393 limit_foo_count_in_scope foo_limit(FooCount + planned_victim_size);
1394 victim = std::move(fixture.source); // fragmented assignment
1395 },
1396 std::bad_alloc, "", test_name
1397 );
1398
1399 verify_c_vector_size(victim, test_name);
1400
1401 fixture.verify_part_of_content_deep_moved(victim, planned_victim_size);
1402
1403 verify_last_segment_allocation_failed(victim, test_name);
1404 verify_copy_and_assign_from_produce_the_same(victim, test_name);
1405 verify_assignment_operator_throws_bad_last_alloc(victim, test_name);
1406}
1407#endif //__TBB_CPP11_RVALUE_REF_PRESENT
1408
1409namespace push_back_exception_safety_helpers{
1410 //TODO: remove code duplication with emplace_helpers::wrapper_type
1411 struct throwing_foo:Foo{
1412 int value1;
1413 int value2;
1414 explicit throwing_foo(int v1, int v2) : value1 (v1), value2(v2) { }
1415 };
1416
1417 template< typename foo_t = throwing_foo>
1418 struct fixture{
1419 typedef tbb::concurrent_vector<foo_t, debug_allocator<foo_t> > vector_t;
1420 vector_t v;
1421
1422 void test( void(*p_test)(vector_t&), const char * test_name){
1423 track_foo_count<__LINE__> verify_no_foo_leaked_during_exception(test_name);
1424 ASSERT_IN_TEST(v.empty(),"incorrect test setup?", test_name );
1425 ASSERT_THROWS_IN_TEST(p_test(v), Foo_exception ,"", test_name);
1426 ASSERT_IN_TEST(is_state<Foo::ZeroInitialized>(v[0]),"incorrectly filled item during exception in emplace_back?", test_name);
1427 }
1428 };
1429}
1430
1431#if __TBB_CPP11_RVALUE_REF_PRESENT
1432void TestPushBackMoveExceptionSafety(){
1433 typedef push_back_exception_safety_helpers::fixture<Foo> fixture_t;
1434 fixture_t t;
1435
1436 limit_foo_count_in_scope foo_limit(FooCount + 1);
1437
1438 struct test{
1439 static void test_move_push_back(fixture_t::vector_t& v){
1440 Foo f;
1441 v.push_back(std::move(f));
1442 }
1443 };
1444 t.test(&test::test_move_push_back, "TestPushBackMoveExceptionSafety");
1445}
1446
1447#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1448void TestEmplaceBackExceptionSafety(){
1449 typedef push_back_exception_safety_helpers::fixture<> fixture_t;
1450 fixture_t t;
1451
1452 Foo dummy; //make FooCount non zero;
1453 Harness::suppress_unused_warning(dummy);
1454 limit_foo_count_in_scope foo_limit(FooCount);
1455
1456 struct test{
1457 static void test_emplace(fixture_t::vector_t& v){
1458 v.emplace_back(1,2);
1459 }
1460 };
1461 t.test(&test::test_emplace, "TestEmplaceBackExceptionSafety");
1462}
1463#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1464#endif //__TBB_CPP11_RVALUE_REF_PRESENT
1465
1466#endif /* TBB_USE_EXCEPTIONS */
1467
1468//------------------------------------------------------------------------
1469// Test support for SIMD instructions
1470//------------------------------------------------------------------------
1471#include "harness_m128.h"
1472
1473#if HAVE_m128 || HAVE_m256
1474
1475template<typename ClassWithVectorType>
1476void TestVectorTypes() {
1477 tbb::concurrent_vector<ClassWithVectorType> v;
1478 for( int i=0; i<100; ++i ) {
1479 // VC8 does not properly align a temporary value; to work around, use explicit variable
1480 ClassWithVectorType foo(i);
1481 v.push_back(foo);
1482 for( int j=0; j<i; ++j ) {
1483 ClassWithVectorType bar(j);
1484 ASSERT( v[j]==bar, NULL );
1485 }
1486 }
1487}
1488#endif /* HAVE_m128 | HAVE_m256 */
1489
1490//------------------------------------------------------------------------
1491
1492namespace v3_backward_compatibility{
1493 namespace segment_t_layout_helpers{
1494 //this is previous definition of according inner class of concurrent_vector_base_v3
1495 struct segment_t_v3 {
1496 void* array;
1497 };
1498 //helper class to access protected members of concurrent_vector_base
1499 struct access_vector_fields :tbb::internal::concurrent_vector_base_v3 {
1500 using tbb::internal::concurrent_vector_base_v3::segment_t;
1501 using tbb::internal::concurrent_vector_base_v3::segment_index_t;
1502 using tbb::internal::concurrent_vector_base_v3::pointers_per_long_table;
1503 using tbb::internal::concurrent_vector_base_v3::internal_segments_table;
1504 };
1505 //this is previous definition of according inner class of concurrent_vector_base_v3
1506 struct internal_segments_table_v3 {
1507 access_vector_fields::segment_index_t first_block;
1508 segment_t_v3 table[access_vector_fields::pointers_per_long_table];
1509 };
1510
1511 template <typename checked_type>
1512 struct alignment_check_helper{
1513 char dummy;
1514 checked_type checked;
1515 };
1516 }
1517 void TestSegmentTLayout(){
1518 using namespace segment_t_layout_helpers;
1519 typedef alignment_check_helper<segment_t_v3> structure_with_old_segment_type;
1520 typedef alignment_check_helper<access_vector_fields::segment_t> structure_with_new_segment_type;
1521
1522 ASSERT((sizeof(structure_with_old_segment_type)==sizeof(structure_with_new_segment_type))
1523 ,"layout of new segment_t and old one differ?");
1524 }
1525
1526 void TestInternalSegmentsTableLayout(){
1527 using namespace segment_t_layout_helpers;
1528 typedef alignment_check_helper<internal_segments_table_v3> structure_with_old_segment_table_type;
1529 typedef alignment_check_helper<access_vector_fields::internal_segments_table> structure_with_new_segment_table_type;
1530
1531 ASSERT((sizeof(structure_with_old_segment_table_type)==sizeof(structure_with_new_segment_table_type))
1532 ,"layout of new internal_segments_table and old one differ?");
1533 }
1534}
1535void TestV3BackwardCompatibility(){
1536 using namespace v3_backward_compatibility;
1537 TestSegmentTLayout();
1538 TestInternalSegmentsTableLayout();
1539}
1540
1541#include "harness_defs.h"
1542
1543#include <vector>
1544#include <numeric>
1545#include <functional>
1546
1547// The helper to run a test only when a default construction is present.
1548template <bool default_construction_present> struct do_default_construction_test {
1549 template<typename FuncType> void operator() ( FuncType func ) const { func(); }
1550};
1551template <> struct do_default_construction_test<false> {
1552 template<typename FuncType> void operator()( FuncType ) const {}
1553};
1554
1555template <typename Type, typename Allocator>
1556class test_grow_by_and_resize : NoAssign {
1557 tbb::concurrent_vector<Type, Allocator> &my_c;
1558public:
1559 test_grow_by_and_resize( tbb::concurrent_vector<Type, Allocator> &c ) : my_c(c) {}
1560 void operator()() const {
1561 const typename tbb::concurrent_vector<Type, Allocator>::size_type sz = my_c.size();
1562 my_c.grow_by( 5 );
1563 ASSERT( my_c.size() == sz + 5, NULL );
1564 my_c.resize( sz );
1565 ASSERT( my_c.size() == sz, NULL );
1566 }
1567};
1568
1569template <typename Type, typename Allocator>
1570void CompareVectors( const tbb::concurrent_vector<Type, Allocator> &c1, const tbb::concurrent_vector<Type, Allocator> &c2 ) {
1571 ASSERT( !(c1 == c2) && c1 != c2, NULL );
1572 ASSERT( c1 <= c2 && c1 < c2 && c2 >= c1 && c2 > c1, NULL );
1573}
1574
1575#if __TBB_CPP11_SMART_POINTERS_PRESENT
1576template <typename Type, typename Allocator>
1577void CompareVectors( const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> &, const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> & ) {
1578 /* do nothing for std::weak_ptr */
1579}
1580#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */
1581
1582template <bool default_construction_present, typename Type, typename Allocator>
1583void Examine( tbb::concurrent_vector<Type, Allocator> c, const std::vector<Type> &vec ) {
1584 typedef tbb::concurrent_vector<Type, Allocator> vector_t;
1585 typedef typename vector_t::size_type size_type_t;
1586
1587 ASSERT( c.size() == vec.size(), NULL );
1588 for ( size_type_t i=0; i<c.size(); ++i ) ASSERT( Harness::IsEqual()(c[i], vec[i]), NULL );
1589 do_default_construction_test<default_construction_present>()(test_grow_by_and_resize<Type,Allocator>(c));
1590 c.grow_by( size_type_t(5), c[0] );
1591 c.grow_to_at_least( c.size()+5, c.at(0) );
1592 vector_t c2;
1593 c2.reserve( 5 );
1594 std::copy( c.begin(), c.begin() + 5, std::back_inserter( c2 ) );
1595
1596 c.grow_by( c2.begin(), c2.end() );
1597 const vector_t& cvcr = c;
1598 ASSERT( Harness::IsEqual()(cvcr.front(), *(c2.rend()-1)), NULL );
1599 ASSERT( Harness::IsEqual()(cvcr.back(), *c2.rbegin()), NULL);
1600 ASSERT( Harness::IsEqual()(*c.cbegin(), *(c.crend()-1)), NULL );
1601 ASSERT( Harness::IsEqual()(*(c.cend()-1), *c.crbegin()), NULL );
1602 c.swap( c2 );
1603 ASSERT( c.size() == 5, NULL );
1604 CompareVectors( c, c2 );
1605 c.swap( c2 );
1606 c2.clear();
1607 ASSERT( c2.size() == 0, NULL );
1608 c2.shrink_to_fit();
1609 Allocator a = c.get_allocator();
1610 a.deallocate( a.allocate(1), 1 );
1611}
1612
1613template <typename Type>
1614class test_default_construction : NoAssign {
1615 const std::vector<Type> &my_vec;
1616public:
1617 test_default_construction( const std::vector<Type> &vec ) : my_vec(vec) {}
1618 void operator()() const {
1619 // Construction with initial size specified by argument n.
1620 tbb::concurrent_vector<Type> c7( my_vec.size() );
1621 std::copy( my_vec.begin(), my_vec.end(), c7.begin() );
1622 Examine</*default_construction_present = */true>( c7, my_vec );
1623 tbb::concurrent_vector< Type, debug_allocator<Type> > c8( my_vec.size() );
1624 std::copy( c7.begin(), c7.end(), c8.begin() );
1625 Examine</*default_construction_present = */true>( c8, my_vec );
1626 }
1627};
1628
1629template <bool default_construction_present, typename Type>
1630void TypeTester( const std::vector<Type> &vec ) {
1631 __TBB_ASSERT( vec.size() >= 5, "Array should have at least 5 elements" );
1632 // Construct empty vector.
1633 tbb::concurrent_vector<Type> c1;
1634 std::copy( vec.begin(), vec.end(), std::back_inserter(c1) );
1635 Examine<default_construction_present>( c1, vec );
1636#if __TBB_INITIALIZER_LISTS_PRESENT
1637 // Constructor from initializer_list.
1638 tbb::concurrent_vector<Type> c2({vec[0],vec[1],vec[2]});
1639 std::copy( vec.begin()+3, vec.end(), std::back_inserter(c2) );
1640 Examine<default_construction_present>( c2, vec );
1641#endif
1642 // Copying constructor.
1643 tbb::concurrent_vector<Type> c3(c1);
1644 Examine<default_construction_present>( c3, vec );
1645 // Construct with non-default allocator
1646 tbb::concurrent_vector< Type, debug_allocator<Type> > c4;
1647 std::copy( vec.begin(), vec.end(), std::back_inserter(c4) );
1648 Examine<default_construction_present>( c4, vec );
1649 // Copying constructor for vector with different allocator type.
1650 tbb::concurrent_vector<Type> c5(c4);
1651 Examine<default_construction_present>( c5, vec );
1652 tbb::concurrent_vector< Type, debug_allocator<Type> > c6(c3);
1653 Examine<default_construction_present>( c6, vec );
1654 // Construction with initial size specified by argument n.
1655 do_default_construction_test<default_construction_present>()(test_default_construction<Type>(vec));
1656 // Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance.
1657 debug_allocator<Type> allocator;
1658 tbb::concurrent_vector< Type, debug_allocator<Type> > c9(vec.size(), vec[1], allocator);
1659 Examine<default_construction_present>( c9, std::vector<Type>(vec.size(), vec[1]) );
1660 // Construction with copying iteration range and given allocator instance.
1661 tbb::concurrent_vector< Type, debug_allocator<Type> > c10(c1.begin(), c1.end(), allocator);
1662 Examine<default_construction_present>( c10, vec );
1663 tbb::concurrent_vector<Type> c11(vec.begin(), vec.end());
1664 Examine<default_construction_present>( c11, vec );
1665}
1666
1667void TestTypes() {
1668 const int NUMBER = 100;
1669
1670 std::vector<int> intArr;
1671 for ( int i=0; i<NUMBER; ++i ) intArr.push_back(i);
1672 TypeTester</*default_construction_present = */true>( intArr );
1673
1674#if __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN
1675 std::vector< std::reference_wrapper<int> > refArr;
1676 // The constructor of std::reference_wrapper<T> from T& is explicit in some versions of libstdc++.
1677 for ( int i=0; i<NUMBER; ++i ) refArr.push_back( std::reference_wrapper<int>(intArr[i]) );
1678 TypeTester</*default_construction_present = */false>( refArr );
1679#else
1680 REPORT( "Known issue: C++11 reference wrapper tests are skipped.\n" );
1681#endif /* __TBB_CPP11_REFERENCE_WRAPPER_PRESENT && !__TBB_REFERENCE_WRAPPER_COMPILATION_BROKEN */
1682
1683 std::vector< tbb::atomic<int> > tbbIntArr( NUMBER );
1684 for ( int i=0; i<NUMBER; ++i ) tbbIntArr[i] = i;
1685 TypeTester</*default_construction_present = */true>( tbbIntArr );
1686
1687#if __TBB_CPP11_SMART_POINTERS_PRESENT
1688 std::vector< std::shared_ptr<int> > shrPtrArr;
1689 for ( int i=0; i<NUMBER; ++i ) shrPtrArr.push_back( std::make_shared<int>(i) );
1690 TypeTester</*default_construction_present = */true>( shrPtrArr );
1691
1692 std::vector< std::weak_ptr<int> > wkPtrArr;
1693 std::copy( shrPtrArr.begin(), shrPtrArr.end(), std::back_inserter(wkPtrArr) );
1694 TypeTester</*default_construction_present = */true>( wkPtrArr );
1695#else
1696 REPORT( "Known issue: C++11 smart pointer tests are skipped.\n" );
1697#endif /* __TBB_CPP11_SMART_POINTERS_PRESENT */
1698}
1699
1700#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1701template <template <typename...> typename TVector>
1702void TestDeductionGuides() {
1703 using ComplexType = const std::string*;
1704 std::vector<ComplexType> v;
1705 std::string s = "s";
1706 auto l = {ComplexType(&s), ComplexType(&s)};
1707
1708 // check TVector(InputIterator, InputIterator)
1709 TVector v1(v.begin(), v.end());
1710 static_assert(std::is_same<decltype(v1), TVector<ComplexType>>::value);
1711
1712 // check TVector(InputIterator, InputIterator, Alocator)
1713 TVector v2(v.begin(), v.end(), std::allocator<ComplexType>());
1714 static_assert(std::is_same<decltype(v2),
1715 TVector<ComplexType, std::allocator<ComplexType>>>::value);
1716
1717 // check TVector(std::initializer_list<T>)
1718 TVector v3(l);
1719 static_assert(std::is_same<decltype(v3),
1720 TVector<ComplexType>>::value);
1721
1722 // check TVector(std::initializer_list, Alocator)
1723 TVector v4(l, std::allocator<ComplexType>());
1724 static_assert(std::is_same<decltype(v4), TVector<ComplexType, std::allocator<ComplexType>>>::value);
1725
1726 // check TVector(TVector&)
1727 TVector v5(v1);
1728 static_assert(std::is_same<decltype(v5), TVector<ComplexType>>::value);
1729
1730 // check TVector(TVector&, Allocator)
1731 TVector v6(v5, std::allocator<ComplexType>());
1732 static_assert(std::is_same<decltype(v6), TVector<ComplexType, std::allocator<ComplexType>>>::value);
1733
1734 // check TVector(TVector&&)
1735 TVector v7(std::move(v1));
1736 static_assert(std::is_same<decltype(v7), decltype(v1)>::value);
1737
1738 // check TVector(TVector&&, Allocator)
1739 TVector v8(std::move(v5), std::allocator<ComplexType>());
1740 static_assert(std::is_same<decltype(v8), TVector<ComplexType, std::allocator<ComplexType>>>::value);
1741
1742 // check TVector(TVector&, Allocator)
1743 TVector v9(v1, std::allocator<ComplexType>());
1744 static_assert(std::is_same<decltype(v9), TVector<ComplexType, std::allocator<ComplexType>>>::value);
1745
1746}
1747#endif
1748
1749// Currently testing compilation issues with polymorphic allocator, but concurrent_vector does not
1750// provide full support yet.
1751// TODO: extend test with full checking polymorphic_allocator with concurrent_vector
1752void TestPMRSupport() {
1753 typedef pmr_stateful_allocator<int> AType;
1754 typedef tbb::concurrent_vector<int, AType> VType;
1755 const int VEC_SIZE = 1000;
1756
1757 AType original_alloc;
1758 VType c(original_alloc);
1759
1760 // General compilation test
1761 for( int i = 0; i < VEC_SIZE; ++i ) {
1762 c.push_back(i*i);
1763 }
1764 VType::const_iterator p = c.begin();
1765 for( int i = 0; i < VEC_SIZE; ++i ) {
1766 ASSERT( *p == i*i, NULL ); ++p;
1767 }
1768
1769 // Check that swap is allocator aware
1770 AType swap_alloc;
1771 VType swap_container(swap_alloc); swap_container.swap(c);
1772 ASSERT(c.get_allocator() != swap_container.get_allocator(), "Allocator was swapped, it shouldn't");
1773
1774#if __TBB_CPP11_RVALUE_REF_PRESENT
1775 // Move assignment operator deleted, container is allocator aware
1776 AType move_alloc;
1777 VType move_container(move_alloc);
1778 move_container = std::move(c);
1779 ASSERT(c.get_allocator() != move_container.get_allocator(), "Allocator was moved, it shouldn't");
1780#endif
1781}
1782
1783int TestMain () {
1784 if( MinThread<1 ) {
1785 REPORT("ERROR: MinThread=%d, but must be at least 1\n",MinThread); MinThread = 1;
1786 }
1787 TestFoo();
1788 TestV3BackwardCompatibility();
1789 TestIteratorTraits<tbb::concurrent_vector<Foo>::iterator,Foo>();
1790 TestIteratorTraits<tbb::concurrent_vector<Foo>::const_iterator,const Foo>();
1791 TestArrayLength();
1792 TestAllOf();
1793#if __TBB_INITIALIZER_LISTS_PRESENT
1794 TestInitList();
1795#else
1796 REPORT("Known issue: initializer list tests are skipped.\n");
1797#endif
1798 TestSequentialFor<FooWithAssign> ();
1799 TestResizeAndCopy();
1800 TestAssign();
1801#if __TBB_CPP11_RVALUE_REF_PRESENT
1802 TestMoveConstructor<c_vector_type>();
1803 TestMoveAssignOperator<c_vector_type>();
1804 TestConstructorWithMoveIterators<c_vector_type>();
1805 TestAssignWithMoveIterators<c_vector_type>();
1806 TestSerialGrowByWithMoveIterators();
1807#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1808 TestSerialMoveInShrinkToFit();
1809#endif // __TBB_MOVE_IF_NOEXCEPT_PRESENT
1810#else
1811 REPORT("Known issue: tests for vector move constructor/assignment operator are skipped.\n");
1812#endif
1813 TestGrowToAtLeastWithSourceParameter<tbb::concurrent_vector<int> >(12345);
1814 TestSerialGrowByRange(false);
1815 TestSerialGrowByRange(true);
1816#if __TBB_CPP11_RVALUE_REF_PRESENT
1817 TestPushBackMoveOnlyContainee();
1818#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1819 TestEmplaceBack();
1820#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1821#endif //__TBB_CPP11_RVALUE_REF_PRESENT
1822#if HAVE_m128
1823 TestVectorTypes<ClassWithSSE>();
1824#endif
1825#if HAVE_m256
1826 if (have_AVX()) TestVectorTypes<ClassWithAVX>();
1827#endif
1828 TestCapacity();
1829 ASSERT( !FooCount, NULL );
1830 for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
1831 tbb::task_scheduler_init init( nthread );
1832 TestParallelFor( nthread );
1833 TestConcurrentGrowToAtLeast();
1834 TestConcurrentGrowBy( nthread );
1835 }
1836 ASSERT( !FooCount, NULL );
1837 TestComparison();
1838 TestFindPrimes();
1839 TestSort();
1840#if __TBB_RANGE_BASED_FOR_PRESENT
1841 TestRangeBasedFor();
1842#endif //if __TBB_RANGE_BASED_FOR_PRESENT
1843#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
1844 REPORT("Known issue: exception safety test is skipped.\n");
1845#elif TBB_USE_EXCEPTIONS
1846 TestExceptions();
1847 TestExceptionSafetyGuaranteesForAssignOperator();
1848#if __TBB_CPP11_RVALUE_REF_PRESENT
1849 TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorMemoryFailure<c_vector_type>();
1850 TestExceptionSafetyGuaranteesMoveConstructorWithUnEqualAllocatorExceptionInElementCtor<c_vector_type>();
1851 TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorMemoryFailure();
1852 TestExceptionSafetyGuaranteesForMoveAssignOperatorWithUnEqualAllocatorExceptionInElementCtor();
1853 TestPushBackMoveExceptionSafety();
1854#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1855 TestEmplaceBackExceptionSafety();
1856#endif /*__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT */
1857#else
1858 REPORT("Known issue: exception safety tests for move constructor/assignment operator , grow_by are skipped.\n");
1859#endif /*__TBB_CPP11_RVALUE_REF_PRESENT */
1860#endif /* TBB_USE_EXCEPTIONS */
1861 TestTypes();
1862#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1863 TestDeductionGuides<tbb::concurrent_vector>();
1864#endif
1865 TestPMRSupport();
1866
1867 ASSERT( !FooCount, NULL );
1868 REMARK("sizeof(concurrent_vector<int>) == %d\n", (int)sizeof(tbb::concurrent_vector<int>));
1869 return Harness::Done;
1870}
1871
1872#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1873 #pragma warning (pop)
1874#endif // warning 4800 is back
1875