1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#include "tbb/parallel_do.h"
18#include "tbb/task_scheduler_init.h"
19#include "tbb/atomic.h"
20#include "harness.h"
21#include "harness_cpu.h"
22
23#if defined(_MSC_VER) && defined(_Wp64)
24 // Workaround for overzealous compiler warnings in /Wp64 mode
25 #pragma warning (disable: 4267)
26#endif /* _MSC_VER && _Wp64 */
27
28#define N_DEPTHS 20
29
30static tbb::atomic<int> g_values_counter;
31
32class value_t {
33 size_t x;
34 value_t& operator= ( const value_t& );
35public:
36 value_t ( size_t xx ) : x(xx) { ++g_values_counter; }
37 value_t ( const value_t& v ) : x(v.x) { ++g_values_counter; }
38#if __TBB_CPP11_RVALUE_REF_PRESENT
39 value_t ( value_t&& v ) : x(v.x) { ++g_values_counter; }
40#endif
41 ~value_t () { --g_values_counter; }
42 size_t value() const volatile { return x; }
43};
44
45#include "harness_iterator.h"
46
47static size_t g_tasks_expected = 0;
48static tbb::atomic<size_t> g_tasks_observed;
49
50size_t FindNumOfTasks ( size_t max_depth ) {
51 if( max_depth == 0 )
52 return 1;
53 return max_depth * FindNumOfTasks( max_depth - 1 ) + 1;
54}
55
56//! Simplest form of the parallel_do functor object.
57class FakeTaskGeneratorBody {
58public:
59 //! The simplest form of the function call operator
60 /** It does not allow adding new tasks during its execution. **/
61 void operator() ( value_t depth ) const {
62 g_tasks_observed += FindNumOfTasks(depth.value());
63 }
64};
65
66/** Work item is passed by reference here. **/
67class FakeTaskGeneratorBody_RefVersion {
68public:
69 void operator() ( value_t& depth ) const {
70 g_tasks_observed += FindNumOfTasks(depth.value());
71 }
72};
73
74/** Work item is passed by reference to const here. **/
75class FakeTaskGeneratorBody_ConstRefVersion {
76public:
77 void operator() ( const value_t& depth ) const {
78 g_tasks_observed += FindNumOfTasks(depth.value());
79 }
80};
81
82/** Work item is passed by reference to volatile here. **/
83class FakeTaskGeneratorBody_VolatileRefVersion {
84public:
85 void operator() ( volatile value_t& depth, tbb::parallel_do_feeder<value_t>& ) const {
86 g_tasks_observed += FindNumOfTasks(depth.value());
87 }
88};
89
90#if __TBB_CPP11_RVALUE_REF_PRESENT
91/** Work item is passed by rvalue reference here. **/
92class FakeTaskGeneratorBody_RvalueRefVersion {
93public:
94 void operator() ( value_t&& depth ) const {
95 g_tasks_observed += FindNumOfTasks(depth.value());
96 }
97};
98#endif
99
100void do_work ( const value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) {
101 ++g_tasks_observed;
102 value_t new_value(depth.value()-1);
103 for( size_t i = 0; i < depth.value(); ++i) {
104 if (i%2) feeder.add( new_value ); // pass lvalue
105 else feeder.add( value_t(depth.value()-1) ); // pass rvalue
106 }
107}
108
109//! Standard form of the parallel_do functor object.
110/** Allows adding new work items on the fly. **/
111class TaskGeneratorBody
112{
113public:
114 //! This form of the function call operator can be used when the body needs to add more work during the processing
115 void operator() ( value_t depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
116 do_work(depth, feeder);
117 }
118private:
119 // Assert that parallel_do does not ever access body constructors
120 TaskGeneratorBody () {}
121 TaskGeneratorBody ( const TaskGeneratorBody& );
122 // These functions need access to the default constructor
123 template<class Body, class Iterator> friend void TestBody( size_t );
124 template<class Body, class Iterator> friend void TestBody_MoveOnly( size_t );
125};
126
127/** Work item is passed by reference here. **/
128class TaskGeneratorBody_RefVersion
129{
130public:
131 void operator() ( value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
132 do_work(depth, feeder);
133 }
134};
135
136/** Work item is passed as const here. Compilers must ignore the const qualifier. **/
137class TaskGeneratorBody_ConstVersion
138{
139public:
140 void operator() ( const value_t depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
141 do_work(depth, feeder);
142 }
143};
144
145/** Work item is passed by reference to const here. **/
146class TaskGeneratorBody_ConstRefVersion
147{
148public:
149 void operator() ( const value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
150 do_work(depth, feeder);
151 }
152};
153
154/** Work item is passed by reference to volatile here. **/
155class TaskGeneratorBody_VolatileRefVersion
156{
157public:
158 void operator() ( volatile value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
159 do_work(const_cast<value_t&>(depth), feeder);
160 }
161};
162
163/** Work item is passed by reference to const volatile here. **/
164class TaskGeneratorBody_ConstVolatileRefVersion
165{
166public:
167 void operator() ( const volatile value_t& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
168 do_work(const_cast<value_t&>(depth), feeder);
169 }
170};
171
172#if __TBB_CPP11_RVALUE_REF_PRESENT
173/** Work item is passed by rvalue reference here. **/
174class TaskGeneratorBody_RvalueRefVersion
175{
176public:
177 void operator() ( value_t&& depth, tbb::parallel_do_feeder<value_t>& feeder ) const {
178 do_work(depth, feeder);
179 }
180};
181#endif
182
183static value_t g_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2};
184
185#if __TBB_CPP11_RVALUE_REF_PRESENT
186template<class Body, class Iterator>
187void TestBody_MoveIter ( const Body& body, Iterator begin, Iterator end ) {
188 typedef std::move_iterator<Iterator> MoveIterator;
189 MoveIterator mbegin(begin);
190 MoveIterator mend(end);
191 g_tasks_observed = 0;
192 tbb::parallel_do(mbegin, mend, body);
193 ASSERT (g_tasks_observed == g_tasks_expected, NULL);
194}
195
196template<class Body, class Iterator>
197void TestBody_MoveOnly ( size_t depth ) {
198 typedef typename std::iterator_traits<Iterator>::value_type value_type;
199 value_type a_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2};
200 TestBody_MoveIter( Body(), Iterator(a_depths), Iterator(a_depths + depth));
201}
202#endif
203
204template<class Body, class Iterator>
205void TestBody ( size_t depth ) {
206 typedef typename std::iterator_traits<Iterator>::value_type value_type;
207 value_type a_depths[N_DEPTHS] = {0, 1, 2, 3, 4, 0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 0, 1, 2};
208 Body body;
209 Iterator begin(a_depths);
210 Iterator end(a_depths + depth);
211 g_tasks_observed = 0;
212 tbb::parallel_do(begin, end, body);
213 ASSERT (g_tasks_observed == g_tasks_expected, NULL);
214#if __TBB_CPP11_RVALUE_REF_PRESENT
215 TestBody_MoveIter( body, Iterator(a_depths), Iterator(a_depths + depth) );
216#endif
217}
218
219template<class Iterator>
220void TestIterator_Common ( size_t depth ) {
221 TestBody<FakeTaskGeneratorBody, Iterator> (depth);
222 TestBody<FakeTaskGeneratorBody_ConstRefVersion, Iterator> (depth);
223 TestBody<TaskGeneratorBody, Iterator> (depth);
224 TestBody<TaskGeneratorBody_ConstVersion, Iterator> (depth);
225 TestBody<TaskGeneratorBody_ConstRefVersion, Iterator> (depth);
226}
227
228template<class Iterator>
229void TestIterator_Const ( size_t depth ) {
230 TestIterator_Common<Iterator>(depth);
231 TestBody<TaskGeneratorBody_ConstVolatileRefVersion, Iterator> (depth);
232}
233
234template<class Iterator>
235void TestIterator_Modifiable ( size_t depth ) {
236 TestIterator_Const<Iterator>(depth);
237 TestBody<FakeTaskGeneratorBody_RefVersion, Iterator> (depth);
238 TestBody<FakeTaskGeneratorBody_VolatileRefVersion, Iterator> (depth);
239 TestBody<TaskGeneratorBody_RefVersion, Iterator> (depth);
240 TestBody<TaskGeneratorBody_VolatileRefVersion, Iterator> (depth);
241#if __TBB_CPP11_RVALUE_REF_PRESENT
242 TestBody_MoveOnly<FakeTaskGeneratorBody_RvalueRefVersion, Iterator> (depth);
243 TestBody_MoveOnly<TaskGeneratorBody_RvalueRefVersion, Iterator> (depth);
244#endif
245}
246
247template<class Iterator>
248void TestIterator_Movable ( size_t depth ) {
249 TestIterator_Common<Iterator>(depth);
250#if __TBB_CPP11_RVALUE_REF_PRESENT
251 TestBody<FakeTaskGeneratorBody_RvalueRefVersion, Iterator> (depth);
252 TestBody<TaskGeneratorBody_RvalueRefVersion, Iterator> (depth);
253#endif
254}
255
256void Run( int /*nthread*/ ) {
257 for( size_t depth = 0; depth <= N_DEPTHS; ++depth ) {
258 g_tasks_expected = 0;
259 for ( size_t i=0; i < depth; ++i )
260 g_tasks_expected += FindNumOfTasks( g_depths[i].value() );
261 // Test for iterators over values convertible to work item type
262 TestIterator_Movable<size_t*>(depth);
263 // Test for random access iterators
264 TestIterator_Modifiable<value_t*>(depth);
265 // Test for input iterators
266 TestIterator_Modifiable<Harness::InputIterator<value_t> >(depth);
267 // Test for forward iterators
268 TestIterator_Modifiable<Harness::ForwardIterator<value_t> >(depth);
269 // Test for const random access iterators
270 TestIterator_Const<Harness::ConstRandomIterator<value_t> >(depth);
271 }
272}
273
274const size_t elements = 10000;
275const size_t init_sum = 0;
276tbb::atomic<size_t> element_counter;
277
278template<size_t K>
279struct set_to {
280 void operator()(size_t& x) const {
281 x = K;
282 ++element_counter;
283 }
284};
285
286#include "test_range_based_for.h"
287#include <functional>
288#include <deque>
289
290void range_do_test() {
291 using namespace range_based_for_support_tests;
292 std::deque<size_t> v(elements, 0);
293
294 // iterator, const and non-const range check
295 element_counter = 0;
296 tbb::parallel_do(v.begin(), v.end(), set_to<1>());
297 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
298 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones");
299
300 element_counter = 0;
301 tbb::parallel_do(v, set_to<0>());
302 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
303 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum, "elements of v not all zeros");
304
305 element_counter = 0;
306 tbb::parallel_do(tbb::blocked_range<std::deque<size_t>::iterator>(v.begin(), v.end()), set_to<1>());
307 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
308 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones");
309
310 // same as above with context group
311 element_counter = 0;
312 tbb::task_group_context context;
313 tbb::parallel_do(v.begin(), v.end(), set_to<0>(), context);
314 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
315 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum, "elements of v not all ones");
316
317 element_counter = 0;
318 tbb::parallel_do(v, set_to<1>(), context);
319 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
320 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == v.size(), "elements of v not all ones");
321
322 element_counter = 0;
323 tbb::parallel_do(tbb::blocked_range<std::deque<size_t>::iterator>(v.begin(), v.end()), set_to<0>(), context);
324 ASSERT(element_counter == v.size() && element_counter == elements, "not all elements were set");
325 ASSERT(range_based_for_accumulate(v, std::plus<size_t>(), init_sum) == init_sum, "elements of v not all zeros");
326}
327
328#if __TBB_CPP11_RVALUE_REF_PRESENT
329namespace TestMoveSem {
330 struct MovePreferable : Movable {
331 MovePreferable() : Movable(), addtofeed(true) {}
332 MovePreferable(bool addtofeed_) : Movable(), addtofeed(addtofeed_) {}
333 MovePreferable(MovePreferable&& other) : Movable(std::move(other)), addtofeed(other.addtofeed) {};
334 // base class is explicitly initialized in the copy ctor to avoid -Wextra warnings
335 MovePreferable(const MovePreferable& other) : Movable(other) { REPORT("Error: copy ctor preferred.\n"); };
336 MovePreferable& operator=(const MovePreferable&) { REPORT("Error: copy assign operator preferred.\n"); return *this; }
337 bool addtofeed;
338 };
339 struct MoveOnly : MovePreferable, NoCopy {
340 MoveOnly() : MovePreferable() {}
341 MoveOnly(bool addtofeed_) : MovePreferable(addtofeed_) {}
342 MoveOnly(MoveOnly&& other) : MovePreferable(std::move(other)) {};
343 };
344}
345
346template<typename T>
347void RecordAndAdd(const T& in, tbb::parallel_do_feeder<T>& feeder) {
348 ASSERT(in.alive, "Got dead object in body");
349 size_t i = ++g_tasks_observed;
350 if (in.addtofeed) {
351 if (i%2) feeder.add(T(false));
352 else {
353 T a(false);
354 feeder.add(std::move(a));
355 }
356 }
357}
358// Take an item by rvalue reference
359template<typename T>
360struct TestMoveIteratorBody {
361 void operator() (T&& in, tbb::parallel_do_feeder<T>& feeder) const { RecordAndAdd(in, feeder); }
362};
363// Take an item by value
364template<typename T>
365struct TestMoveIteratorBodyByValue {
366 void operator() (T in, tbb::parallel_do_feeder<T>& feeder) const { RecordAndAdd(in, feeder); }
367};
368
369template<typename Iterator, typename Body>
370void TestMoveIterator() {
371 typedef typename std::iterator_traits<Iterator>::value_type value_type;
372
373 Body body;
374 const size_t size = 65;
375 g_tasks_observed = 0;
376 value_type a[size];
377 tbb::parallel_do( std::make_move_iterator(Iterator(a)), std::make_move_iterator(Iterator(a+size)), body );
378 ASSERT(size * 2 == g_tasks_observed, NULL);
379}
380
381template<typename T>
382void DoTestMoveSemantics() {
383 TestMoveIterator< Harness::InputIterator<T>, TestMoveIteratorBody<T> >();
384 TestMoveIterator< Harness::ForwardIterator<T>, TestMoveIteratorBody<T> >();
385 TestMoveIterator< Harness::RandomIterator<T>, TestMoveIteratorBody<T> >();
386
387 TestMoveIterator< Harness::InputIterator<T>, TestMoveIteratorBodyByValue<T> >();
388 TestMoveIterator< Harness::ForwardIterator<T>, TestMoveIteratorBodyByValue<T> >();
389 TestMoveIterator< Harness::RandomIterator<T>, TestMoveIteratorBodyByValue<T> >();
390}
391
392void TestMoveSemantics() {
393 DoTestMoveSemantics<TestMoveSem::MovePreferable>();
394#if __TBB_CPP11_IS_COPY_CONSTRUCTIBLE_PRESENT && !__TBB_IS_COPY_CONSTRUCTIBLE_BROKEN
395 // parallel_do uses is_copy_constructible to support non-copyable types
396 DoTestMoveSemantics<TestMoveSem::MoveOnly>();
397#endif
398}
399#else /* __TBB_CPP11_RVALUE_REF_PRESENT */
400void TestMoveSemantics() {
401 REPORT("Known issue: move support tests are skipped.\n");
402}
403#endif
404
405int TestMain () {
406 if( MinThread<1 ) {
407 REPORT("number of threads must be positive\n");
408 exit(1);
409 }
410 g_values_counter = 0;
411 for( int p=MinThread; p<=MaxThread; ++p ) {
412 tbb::task_scheduler_init init( p );
413 Run(p);
414 range_do_test();
415 // Test that all workers sleep when no work
416 TestCPUUserTime(p);
417 }
418 // This check must be performed after the scheduler terminated because only in this
419 // case there is a guarantee that the workers already destroyed their last tasks.
420 ASSERT( g_values_counter == 0, "Value objects were leaked" );
421
422 TestMoveSemantics();
423 return Harness::Done;
424}
425