1 | /* |
2 | Copyright (c) 2005-2019 Intel Corporation |
3 | |
4 | Licensed under the Apache License, Version 2.0 (the "License"); |
5 | you may not use this file except in compliance with the License. |
6 | You may obtain a copy of the License at |
7 | |
8 | http://www.apache.org/licenses/LICENSE-2.0 |
9 | |
10 | Unless required by applicable law or agreed to in writing, software |
11 | distributed under the License is distributed on an "AS IS" BASIS, |
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | See the License for the specific language governing permissions and |
14 | limitations under the License. |
15 | */ |
16 | |
17 | // Before including pipeline.h, set up the variable to count heap allocated |
18 | // filter_node objects, and make it known for the header. |
19 | int filter_node_count = 0; |
20 | #define __TBB_TEST_FILTER_NODE_COUNT filter_node_count |
21 | #include "tbb/pipeline.h" |
22 | |
23 | #include "tbb/atomic.h" |
24 | #include "harness.h" |
25 | #include <string.h> |
26 | |
27 | #include "tbb/tbb_allocator.h" |
28 | #include "tbb/spin_mutex.h" |
29 | |
30 | const unsigned n_tokens = 8; |
31 | // we can conceivably have two buffers used in the middle filter for every token in flight, so |
32 | // we must allocate two buffers for every token. Unlikely, but possible. |
33 | const unsigned n_buffers = 2*n_tokens; |
34 | const int max_counter = 16; |
35 | static tbb::atomic<int> output_counter; |
36 | static tbb::atomic<int> input_counter; |
37 | static tbb::atomic<int> non_pointer_specialized_calls; |
38 | static tbb::atomic<int> pointer_specialized_calls; |
39 | static tbb::atomic<int> first_pointer_specialized_calls; |
40 | static tbb::atomic<int> second_pointer_specialized_calls; |
41 | static tbb::spin_mutex buffer_mutex; |
42 | |
43 | static int intbuffer[max_counter]; // store results for <int,int> parallel pipeline test |
44 | static bool check_intbuffer; |
45 | |
46 | static void* buffers[n_buffers]; |
47 | static bool buf_available[n_buffers]; |
48 | |
49 | void *fetchNextBuffer() { |
50 | tbb::spin_mutex::scoped_lock sl1(buffer_mutex); |
51 | for(size_t icnt = 0; icnt < n_buffers; ++icnt) { |
52 | if(buf_available[icnt]) { |
53 | buf_available[icnt] = false; |
54 | return buffers[icnt]; |
55 | } |
56 | } |
57 | ASSERT(0, "Ran out of buffers" ); |
58 | return 0; |
59 | } |
60 | void freeBuffer(void *buf) { |
61 | for(size_t i=0; i < n_buffers;++i) { |
62 | if(buffers[i] == buf) { |
63 | buf_available[i] = true; |
64 | return; |
65 | } |
66 | } |
67 | ASSERT(0, "Tried to free a buffer not in our list" ); |
68 | } |
69 | |
70 | template<typename T> |
71 | class free_on_scope_exit { |
72 | public: |
73 | free_on_scope_exit(T *p) : my_p(p) {} |
74 | ~free_on_scope_exit() { if(!my_p) return; my_p->~T(); freeBuffer(my_p); } |
75 | private: |
76 | T *my_p; |
77 | }; |
78 | |
79 | #include "harness_checktype.h" |
80 | |
81 | // methods for testing check_type< >, that return okay values for other types. |
82 | template<typename T> |
83 | bool middle_is_ready(T &/*p*/) { return false; } |
84 | |
85 | template<typename U> |
86 | bool middle_is_ready(check_type<U> &p) { return p.is_ready(); } |
87 | |
88 | template<typename T> |
89 | bool output_is_ready(T &/*p*/) { return true; } |
90 | |
91 | template<typename U> |
92 | bool output_is_ready(check_type<U> &p) { return p.is_ready(); } |
93 | |
94 | template<typename T> |
95 | int middle_my_id( T &/*p*/) { return 0; } |
96 | |
97 | template<typename U> |
98 | int middle_my_id(check_type<U> &p) { return p.my_id(); } |
99 | |
100 | template<typename T> |
101 | int output_my_id( T &/*p*/) { return 1; } |
102 | |
103 | template<typename U> |
104 | int output_my_id(check_type<U> &p) { return p.my_id(); } |
105 | |
106 | template<typename T> |
107 | void my_function(T &p) { p = 0; } |
108 | |
109 | template<typename U> |
110 | void my_function(check_type<U> &p) { p.function(); } |
111 | |
112 | // Filters must be copy-constructible, and be const-qualifiable. |
113 | template<typename U> |
114 | class input_filter : Harness::NoAfterlife { |
115 | public: |
116 | U operator()( tbb::flow_control& control ) const { |
117 | AssertLive(); |
118 | if( --input_counter < 0 ) { |
119 | control.stop(); |
120 | } |
121 | else // only count successful reads |
122 | ++non_pointer_specialized_calls; |
123 | return U(); // default constructed |
124 | } |
125 | |
126 | }; |
127 | |
128 | // specialization for pointer |
129 | template<typename U> |
130 | class input_filter<U*> : Harness::NoAfterlife { |
131 | public: |
132 | U* operator()(tbb::flow_control& control) const { |
133 | AssertLive(); |
134 | int ival = --input_counter; |
135 | if(ival < 0) { |
136 | control.stop(); |
137 | return NULL; |
138 | } |
139 | ++pointer_specialized_calls; |
140 | if(ival == max_counter / 2) { |
141 | return NULL; // non-stop NULL |
142 | } |
143 | U* myReturn = new(fetchNextBuffer()) U(); |
144 | return myReturn; |
145 | } |
146 | }; |
147 | |
148 | template<> |
149 | class input_filter<void> : Harness::NoAfterlife { |
150 | public: |
151 | void operator()( tbb::flow_control& control ) const { |
152 | AssertLive(); |
153 | if( --input_counter < 0 ) { |
154 | control.stop(); |
155 | } |
156 | else |
157 | ++non_pointer_specialized_calls; |
158 | } |
159 | |
160 | }; |
161 | |
162 | // specialization for int that passes back a sequence of integers |
163 | template<> |
164 | class input_filter<int> : Harness::NoAfterlife { |
165 | public: |
166 | int |
167 | operator()(tbb::flow_control& control ) const { |
168 | AssertLive(); |
169 | int oldval = --input_counter; |
170 | if( oldval < 0 ) { |
171 | control.stop(); |
172 | } |
173 | else |
174 | ++non_pointer_specialized_calls; |
175 | return oldval+1; |
176 | } |
177 | }; |
178 | |
179 | template<typename T, typename U> |
180 | class middle_filter : Harness::NoAfterlife { |
181 | public: |
182 | U operator()(T t) const { |
183 | AssertLive(); |
184 | ASSERT(!middle_my_id(t), "bad id value" ); |
185 | ASSERT(!middle_is_ready(t), "Already ready" ); |
186 | U out; |
187 | my_function(out); |
188 | ++non_pointer_specialized_calls; |
189 | return out; |
190 | } |
191 | }; |
192 | |
193 | template<typename T, typename U> |
194 | class middle_filter<T*,U> : Harness::NoAfterlife { |
195 | public: |
196 | U operator()(T* my_storage) const { |
197 | free_on_scope_exit<T> my_ptr(my_storage); // free_on_scope_exit marks the buffer available |
198 | AssertLive(); |
199 | if(my_storage) { // may have been passed in a NULL |
200 | ASSERT(!middle_my_id(*my_storage), "bad id value" ); |
201 | ASSERT(!middle_is_ready(*my_storage), "Already ready" ); |
202 | } |
203 | ++first_pointer_specialized_calls; |
204 | U out; |
205 | my_function(out); |
206 | return out; |
207 | } |
208 | }; |
209 | |
210 | template<typename T, typename U> |
211 | class middle_filter<T,U*> : Harness::NoAfterlife { |
212 | public: |
213 | U* operator()(T my_storage) const { |
214 | AssertLive(); |
215 | ASSERT(!middle_my_id(my_storage), "bad id value" ); |
216 | ASSERT(!middle_is_ready(my_storage), "Already ready" ); |
217 | // allocate new space from buffers |
218 | U* my_return = new(fetchNextBuffer()) U(); |
219 | my_function(*my_return); |
220 | ++second_pointer_specialized_calls; |
221 | return my_return; |
222 | } |
223 | }; |
224 | |
225 | template<typename T, typename U> |
226 | class middle_filter<T*,U*> : Harness::NoAfterlife { |
227 | public: |
228 | U* operator()(T* my_storage) const { |
229 | free_on_scope_exit<T> my_ptr(my_storage); // free_on_scope_exit marks the buffer available |
230 | AssertLive(); |
231 | if(my_storage) { |
232 | ASSERT(!middle_my_id(*my_storage), "bad id value" ); |
233 | ASSERT(!middle_is_ready(*my_storage), "Already ready" ); |
234 | } |
235 | // may have been passed a NULL |
236 | ++pointer_specialized_calls; |
237 | if(!my_storage) return NULL; |
238 | ASSERT(!middle_my_id(*my_storage), "bad id value" ); |
239 | ASSERT(!middle_is_ready(*my_storage), "Already ready" ); |
240 | U* my_return = new(fetchNextBuffer()) U(); |
241 | my_function(*my_return); |
242 | return my_return; |
243 | } |
244 | }; |
245 | |
246 | // specialization for int that squares the input and returns that. |
247 | template<> |
248 | class middle_filter<int,int> : Harness::NoAfterlife { |
249 | public: |
250 | int operator()(int my_input) const { |
251 | AssertLive(); |
252 | ++non_pointer_specialized_calls; |
253 | return my_input*my_input; |
254 | } |
255 | }; |
256 | |
257 | // --------------------------------- |
258 | template<typename T> |
259 | class output_filter : Harness::NoAfterlife { |
260 | public: |
261 | void operator()(T c) const { |
262 | AssertLive(); |
263 | ASSERT(output_my_id(c), "unset id value" ); |
264 | ASSERT(output_is_ready(c), "not yet ready" ); |
265 | ++non_pointer_specialized_calls; |
266 | output_counter++; |
267 | } |
268 | }; |
269 | |
270 | // specialization for int that puts the received value in an array |
271 | template<> |
272 | class output_filter<int> : Harness::NoAfterlife { |
273 | public: |
274 | void operator()(int my_input) const { |
275 | AssertLive(); |
276 | ++non_pointer_specialized_calls; |
277 | int myindx = output_counter++; |
278 | intbuffer[myindx] = my_input; |
279 | } |
280 | }; |
281 | |
282 | |
283 | template<typename T> |
284 | class output_filter<T*> : Harness::NoAfterlife { |
285 | public: |
286 | void operator()(T* c) const { |
287 | free_on_scope_exit<T> my_ptr(c); |
288 | AssertLive(); |
289 | if(c) { |
290 | ASSERT(output_my_id(*c), "unset id value" ); |
291 | ASSERT(output_is_ready(*c), "not yet ready" ); |
292 | } |
293 | output_counter++; |
294 | ++pointer_specialized_calls; |
295 | } |
296 | }; |
297 | |
298 | typedef enum { |
299 | no_pointer_counts, |
300 | assert_nonpointer, |
301 | assert_firstpointer, |
302 | assert_secondpointer, |
303 | assert_allpointer |
304 | } final_assert_type; |
305 | |
306 | void resetCounters() { |
307 | output_counter = 0; |
308 | input_counter = max_counter; |
309 | non_pointer_specialized_calls = 0; |
310 | pointer_specialized_calls = 0; |
311 | first_pointer_specialized_calls = 0; |
312 | second_pointer_specialized_calls = 0; |
313 | // we have to reset the buffer flags because our input filters return allocated space on end-of-input, |
314 | // (on eof a default-constructed object is returned) and they do not pass through the filter further. |
315 | for(size_t i = 0; i < n_buffers; ++i) |
316 | buf_available[i] = true; |
317 | } |
318 | |
319 | void checkCounters(final_assert_type my_t) { |
320 | ASSERT(output_counter == max_counter, "not all tokens were passed through pipeline" ); |
321 | switch(my_t) { |
322 | case assert_nonpointer: |
323 | ASSERT(pointer_specialized_calls+first_pointer_specialized_calls+second_pointer_specialized_calls == 0, "non-pointer filters specialized to pointer" ); |
324 | ASSERT(non_pointer_specialized_calls == 3*max_counter, "bad count for non-pointer filters" ); |
325 | if(check_intbuffer) { |
326 | for(int i = 1; i <= max_counter; ++i) { |
327 | int j = i*i; |
328 | bool found_val = false; |
329 | for(int k = 0; k < max_counter; ++k) { |
330 | if(intbuffer[k] == j) { |
331 | found_val = true; |
332 | break; |
333 | } |
334 | } |
335 | ASSERT(found_val, "Missing value in output array" ); |
336 | } |
337 | } |
338 | break; |
339 | case assert_firstpointer: |
340 | ASSERT(pointer_specialized_calls == max_counter && // input filter extra invocation |
341 | first_pointer_specialized_calls == max_counter && |
342 | non_pointer_specialized_calls == max_counter && |
343 | second_pointer_specialized_calls == 0, "incorrect specialization for firstpointer" ); |
344 | break; |
345 | case assert_secondpointer: |
346 | ASSERT(pointer_specialized_calls == max_counter && |
347 | first_pointer_specialized_calls == 0 && |
348 | non_pointer_specialized_calls == max_counter && // input filter |
349 | second_pointer_specialized_calls == max_counter, "incorrect specialization for firstpointer" ); |
350 | break; |
351 | case assert_allpointer: |
352 | ASSERT(non_pointer_specialized_calls+first_pointer_specialized_calls+second_pointer_specialized_calls == 0, "pointer filters specialized to non-pointer" ); |
353 | ASSERT(pointer_specialized_calls == 3*max_counter, "bad count for pointer filters" ); |
354 | break; |
355 | case no_pointer_counts: |
356 | break; |
357 | } |
358 | } |
359 | |
360 | static const tbb::filter::mode filter_table[] = { tbb::filter::parallel, tbb::filter::serial_in_order, tbb::filter::serial_out_of_order}; |
361 | const unsigned number_of_filter_types = sizeof(filter_table)/sizeof(filter_table[0]); |
362 | |
363 | typedef tbb::filter_t<void, void> filter_chain; |
364 | typedef tbb::filter::mode mode_array; |
365 | |
366 | // The filters are passed by value, which forces a temporary copy to be created. This is |
367 | // to reproduce the bug where a filter_chain uses refs to filters, which after a call |
368 | // would be references to destructed temporaries. |
369 | template<typename type1, typename type2> |
370 | void fill_chain( filter_chain &my_chain, mode_array *filter_type, input_filter<type1> i_filter, |
371 | middle_filter<type1, type2> m_filter, output_filter<type2> o_filter ) { |
372 | my_chain = tbb::make_filter<void, type1>(filter_type[0], i_filter) & |
373 | tbb::make_filter<type1, type2>(filter_type[1], m_filter) & |
374 | tbb::make_filter<type2, void>(filter_type[2], o_filter); |
375 | } |
376 | |
377 | void run_function_spec() { |
378 | ASSERT(!filter_node_count, NULL); |
379 | REMARK("Testing < void, void > (single filter in pipeline)" ); |
380 | #if __TBB_CPP11_LAMBDAS_PRESENT |
381 | REMARK( " ( + lambdas)" ); |
382 | #endif |
383 | REMARK("\n" ); |
384 | input_filter<void> i_filter; |
385 | // Test pipeline that contains only one filter |
386 | for( unsigned i = 0; i<number_of_filter_types; i++) { |
387 | tbb::filter_t<void, void> one_filter( filter_table[i], i_filter ); |
388 | ASSERT(filter_node_count==1, "some filter nodes left after previous iteration?" ); |
389 | resetCounters(); |
390 | tbb::parallel_pipeline( n_tokens, one_filter ); |
391 | // no need to check counters |
392 | #if __TBB_CPP11_LAMBDAS_PRESENT |
393 | tbb::atomic<int> counter; |
394 | counter = max_counter; |
395 | // Construct filter using lambda-syntax when parallel_pipeline() is being run; |
396 | tbb::parallel_pipeline( n_tokens, |
397 | tbb::make_filter<void, void>(filter_table[i], [&counter]( tbb::flow_control& control ) { |
398 | if( counter-- == 0 ) |
399 | control.stop(); |
400 | } |
401 | ) |
402 | ); |
403 | #endif |
404 | } |
405 | ASSERT(!filter_node_count, "filter_node objects leaked" ); |
406 | } |
407 | |
408 | template<typename t1, typename t2> |
409 | void run_filter_set( |
410 | input_filter<t1>& i_filter, |
411 | middle_filter<t1,t2>& m_filter, |
412 | output_filter<t2>& o_filter, |
413 | mode_array *filter_type, |
414 | final_assert_type my_t) { |
415 | tbb::filter_t<void, t1> filter1( filter_type[0], i_filter ); |
416 | tbb::filter_t<t1, t2> filter2( filter_type[1], m_filter ); |
417 | tbb::filter_t<t2, void> filter3( filter_type[2], o_filter ); |
418 | ASSERT(filter_node_count==3, "some filter nodes left after previous iteration?" ); |
419 | resetCounters(); |
420 | // Create filters sequence when parallel_pipeline() is being run |
421 | tbb::parallel_pipeline( n_tokens, filter1 & filter2 & filter3 ); |
422 | checkCounters(my_t); |
423 | |
424 | // Create filters sequence partially outside parallel_pipeline() and also when parallel_pipeline() is being run |
425 | tbb::filter_t<void, t2> filter12; |
426 | filter12 = filter1 & filter2; |
427 | resetCounters(); |
428 | tbb::parallel_pipeline( n_tokens, filter12 & filter3 ); |
429 | checkCounters(my_t); |
430 | |
431 | tbb::filter_t<void, void> filter123 = filter12 & filter3; |
432 | // Run pipeline twice with the same filter sequence |
433 | for( unsigned i = 0; i<2; i++ ) { |
434 | resetCounters(); |
435 | tbb::parallel_pipeline( n_tokens, filter123 ); |
436 | checkCounters(my_t); |
437 | } |
438 | |
439 | // Now copy-construct another filter_t instance, and use it to run pipeline |
440 | { |
441 | tbb::filter_t<void, void> copy123( filter123 ); |
442 | resetCounters(); |
443 | tbb::parallel_pipeline( n_tokens, copy123 ); |
444 | checkCounters(my_t); |
445 | } |
446 | |
447 | // Construct filters and create the sequence when parallel_pipeline() is being run |
448 | resetCounters(); |
449 | tbb::parallel_pipeline( n_tokens, |
450 | tbb::make_filter<void, t1>(filter_type[0], i_filter) & |
451 | tbb::make_filter<t1, t2>(filter_type[1], m_filter) & |
452 | tbb::make_filter<t2, void>(filter_type[2], o_filter) ); |
453 | checkCounters(my_t); |
454 | |
455 | // Construct filters, make a copy, destroy the original filters, and run with the copy |
456 | int cnt = filter_node_count; |
457 | { |
458 | tbb::filter_t<void, void>* p123 = new tbb::filter_t<void,void> ( |
459 | tbb::make_filter<void, t1>(filter_type[0], i_filter) & |
460 | tbb::make_filter<t1, t2>(filter_type[1], m_filter) & |
461 | tbb::make_filter<t2, void>(filter_type[2], o_filter) ); |
462 | ASSERT(filter_node_count==cnt+5, "filter node accounting error?" ); |
463 | tbb::filter_t<void, void> copy123( *p123 ); |
464 | delete p123; |
465 | ASSERT(filter_node_count==cnt+5, "filter nodes deleted prematurely?" ); |
466 | resetCounters(); |
467 | tbb::parallel_pipeline( n_tokens, copy123 ); |
468 | checkCounters(my_t); |
469 | } |
470 | |
471 | // construct a filter with temporaries |
472 | { |
473 | tbb::filter_t<void, void> my_filter; |
474 | fill_chain<t1,t2>( my_filter, filter_type, i_filter, m_filter, o_filter ); |
475 | resetCounters(); |
476 | tbb::parallel_pipeline( n_tokens, my_filter ); |
477 | checkCounters(my_t); |
478 | } |
479 | ASSERT(filter_node_count==cnt, "scope ended but filter nodes not deleted?" ); |
480 | } |
481 | |
482 | #if __TBB_CPP11_LAMBDAS_PRESENT |
483 | template <typename t1, typename t2> |
484 | void run_lambdas_test( mode_array *filter_type ) { |
485 | tbb::atomic<int> counter; |
486 | counter = max_counter; |
487 | // Construct filters using lambda-syntax and create the sequence when parallel_pipeline() is being run; |
488 | resetCounters(); // only need the output_counter reset. |
489 | tbb::parallel_pipeline( n_tokens, |
490 | tbb::make_filter<void, t1>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1 { |
491 | if( --counter < 0 ) |
492 | control.stop(); |
493 | return t1(); } |
494 | ) & |
495 | tbb::make_filter<t1, t2>(filter_type[1], []( t1 /*my_storage*/ ) -> t2 { |
496 | return t2(); } |
497 | ) & |
498 | tbb::make_filter<t2, void>(filter_type[2], [] ( t2 ) -> void { |
499 | output_counter++; } |
500 | ) |
501 | ); |
502 | checkCounters(no_pointer_counts); // don't have to worry about specializations |
503 | counter = max_counter; |
504 | // pointer filters |
505 | resetCounters(); |
506 | tbb::parallel_pipeline( n_tokens, |
507 | tbb::make_filter<void, t1*>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1* { |
508 | if( --counter < 0 ) { |
509 | control.stop(); |
510 | return NULL; |
511 | } |
512 | return new(fetchNextBuffer()) t1(); } |
513 | ) & |
514 | tbb::make_filter<t1*, t2*>(filter_type[1], []( t1* my_storage ) -> t2* { |
515 | tbb::tbb_allocator<t1>().destroy(my_storage); // my_storage->~t1(); |
516 | return new(my_storage) t2(); } |
517 | ) & |
518 | tbb::make_filter<t2*, void>(filter_type[2], [] ( t2* my_storage ) -> void { |
519 | tbb::tbb_allocator<t2>().destroy(my_storage); // my_storage->~t2(); |
520 | freeBuffer(my_storage); |
521 | output_counter++; } |
522 | ) |
523 | ); |
524 | checkCounters(no_pointer_counts); |
525 | // first filter outputs pointer |
526 | counter = max_counter; |
527 | resetCounters(); |
528 | tbb::parallel_pipeline( n_tokens, |
529 | tbb::make_filter<void, t1*>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1* { |
530 | if( --counter < 0 ) { |
531 | control.stop(); |
532 | return NULL; |
533 | } |
534 | return new(fetchNextBuffer()) t1(); } |
535 | ) & |
536 | tbb::make_filter<t1*, t2>(filter_type[1], []( t1* my_storage ) -> t2 { |
537 | tbb::tbb_allocator<t1>().destroy(my_storage); // my_storage->~t1(); |
538 | freeBuffer(my_storage); |
539 | return t2(); } |
540 | ) & |
541 | tbb::make_filter<t2, void>(filter_type[2], [] ( t2 /*my_storage*/) -> void { |
542 | output_counter++; } |
543 | ) |
544 | ); |
545 | checkCounters(no_pointer_counts); |
546 | // second filter outputs pointer |
547 | counter = max_counter; |
548 | resetCounters(); |
549 | tbb::parallel_pipeline( n_tokens, |
550 | tbb::make_filter<void, t1>(filter_type[0], [&counter]( tbb::flow_control& control ) -> t1 { |
551 | if( --counter < 0 ) { |
552 | control.stop(); |
553 | } |
554 | return t1(); } |
555 | ) & |
556 | tbb::make_filter<t1, t2*>(filter_type[1], []( t1 /*my_storage*/ ) -> t2* { |
557 | return new(fetchNextBuffer()) t2(); } |
558 | ) & |
559 | tbb::make_filter<t2*, void>(filter_type[2], [] ( t2* my_storage) -> void { |
560 | tbb::tbb_allocator<t2>().destroy(my_storage); // my_storage->~t2(); |
561 | freeBuffer(my_storage); |
562 | output_counter++; } |
563 | ) |
564 | ); |
565 | checkCounters(no_pointer_counts); |
566 | } |
567 | #endif |
568 | |
569 | template<typename type1, typename type2> |
570 | void run_function(const char *l1, const char *l2) { |
571 | ASSERT(!filter_node_count, NULL); |
572 | REMARK("Testing < %s, %s >" , l1, l2 ); |
573 | #if __TBB_CPP11_LAMBDAS_PRESENT |
574 | REMARK( " ( + lambdas)" ); |
575 | #endif |
576 | check_intbuffer = (!strcmp(l1,"int" ) && !strcmp(l2,"int" )); |
577 | if(check_intbuffer) REMARK(", check output of filters" ); |
578 | REMARK("\n" ); |
579 | |
580 | Check<type1> check1; // check constructions/destructions |
581 | Check<type2> check2; // for type1 or type2 === check_type<T> |
582 | |
583 | const size_t number_of_filters = 3; |
584 | |
585 | input_filter<type1> i_filter; |
586 | input_filter<type1*> p_i_filter; |
587 | |
588 | middle_filter<type1, type2> m_filter; |
589 | middle_filter<type1*, type2> pr_m_filter; |
590 | middle_filter<type1, type2*> rp_m_filter; |
591 | middle_filter<type1*, type2*> pp_m_filter; |
592 | |
593 | output_filter<type2> o_filter; |
594 | output_filter<type2*> p_o_filter; |
595 | |
596 | // allocate the buffers for the filters |
597 | unsigned max_size = (sizeof(type1) > sizeof(type2) ) ? sizeof(type1) : sizeof(type2); |
598 | for(unsigned i = 0; i < (unsigned)n_buffers; ++i) { |
599 | buffers[i] = malloc(max_size); |
600 | buf_available[i] = true; |
601 | } |
602 | |
603 | unsigned limit = 1; |
604 | // Test pipeline that contains number_of_filters filters |
605 | for( unsigned i=0; i<number_of_filters; ++i) |
606 | limit *= number_of_filter_types; |
607 | // Iterate over possible filter sequences |
608 | for( unsigned numeral=0; numeral<limit; ++numeral ) { |
609 | unsigned temp = numeral; |
610 | tbb::filter::mode filter_type[number_of_filter_types]; |
611 | for( unsigned i=0; i<number_of_filters; ++i, temp/=number_of_filter_types ) |
612 | filter_type[i] = filter_table[temp%number_of_filter_types]; |
613 | |
614 | run_filter_set<type1,type2>(i_filter, m_filter, o_filter, filter_type, assert_nonpointer ); |
615 | run_filter_set<type1*,type2>(p_i_filter, pr_m_filter, o_filter, filter_type, assert_firstpointer); |
616 | run_filter_set<type1,type2*>(i_filter, rp_m_filter, p_o_filter, filter_type, assert_secondpointer); |
617 | run_filter_set<type1*,type2*>(p_i_filter, pp_m_filter, p_o_filter, filter_type, assert_allpointer); |
618 | |
619 | #if __TBB_CPP11_LAMBDAS_PRESENT |
620 | run_lambdas_test<type1,type2>(filter_type); |
621 | #endif |
622 | } |
623 | ASSERT(!filter_node_count, "filter_node objects leaked" ); |
624 | |
625 | for(unsigned i = 0; i < (unsigned)n_buffers; ++i) { |
626 | free(buffers[i]); |
627 | } |
628 | } |
629 | |
630 | #include "tbb/task_scheduler_init.h" |
631 | |
632 | int TestMain() { |
633 | #if TBB_USE_DEBUG |
634 | // size and copyability. |
635 | REMARK("is_large_object<int>::value=%d\n" , tbb::interface6::internal::is_large_object<int>::value); |
636 | REMARK("is_large_object<double>::value=%d\n" , tbb::interface6::internal::is_large_object<double>::value); |
637 | REMARK("is_large_object<int *>::value=%d\n" , tbb::interface6::internal::is_large_object<int *>::value); |
638 | REMARK("is_large_object<check_type<int> >::value=%d\n" , tbb::interface6::internal::is_large_object<check_type<int> >::value); |
639 | REMARK("is_large_object<check_type<int>* >::value=%d\n" , tbb::interface6::internal::is_large_object<check_type<int>* >::value); |
640 | REMARK("is_large_object<check_type<short> >::value=%d\n\n" , tbb::interface6::internal::is_large_object<check_type<short> >::value); |
641 | #endif |
642 | // Test with varying number of threads. |
643 | for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { |
644 | // Initialize TBB task scheduler |
645 | REMARK("\nTesting with nthread=%d\n" , nthread); |
646 | tbb::task_scheduler_init init(nthread); |
647 | |
648 | // Run test several times with different types |
649 | run_function_spec(); |
650 | run_function<size_t,int>("size_t" , "int" ); |
651 | run_function<int,double>("int" , "double" ); |
652 | run_function<size_t,double>("size_t" , "double" ); |
653 | run_function<size_t,bool>("size_t" , "bool" ); |
654 | run_function<int,int>("int" ,"int" ); |
655 | run_function<check_type<unsigned int>,size_t>("check_type<unsigned int>" , "size_t" ); |
656 | run_function<check_type<unsigned short>,size_t>("check_type<unsigned short>" , "size_t" ); |
657 | run_function<check_type<unsigned int>, check_type<unsigned int> >("check_type<unsigned int>" , "check_type<unsigned int>" ); |
658 | run_function<check_type<unsigned int>, check_type<unsigned short> >("check_type<unsigned int>" , "check_type<unsigned short>" ); |
659 | run_function<check_type<unsigned short>, check_type<unsigned short> >("check_type<unsigned short>" , "check_type<unsigned short>" ); |
660 | run_function<double, check_type<unsigned short> >("double" , "check_type<unsigned short>" ); |
661 | } |
662 | return Harness::Done; |
663 | } |
664 | |
665 | |