1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#include "tbb/compat/condition_variable"
18#include "tbb/mutex.h"
19#include "tbb/recursive_mutex.h"
20#include "tbb/tick_count.h"
21#include "tbb/atomic.h"
22
23#include <stdexcept>
24
25#include "harness.h"
26
27#if TBB_IMPLEMENT_CPP0X
28// This test deliberately avoids a "using tbb" statement,
29// so that the error of putting types in the wrong namespace will be caught.
30using namespace std;
31#else
32using namespace tbb::interface5;
33#endif
34
35#if __TBB_CPP11_RVALUE_REF_PRESENT
36template<typename M>
37void TestUniqueLockMoveConstructorAndAssignOp(){
38 typedef unique_lock<M> unique_lock_t;
39
40 static const bool locked = true;
41 static const bool unlocked = false;
42
43 struct Locked{
44 bool value;
45 Locked(bool a_value) : value(a_value) {}
46 };
47
48 typedef Locked destination;
49 typedef Locked source;
50
51 struct MutexAndLockFixture{
52 M mutex;
53 unique_lock_t lock;
54 const bool was_locked;
55
56 MutexAndLockFixture(source lckd_src) : lock(mutex), was_locked(lckd_src.value){
57 if (!lckd_src.value) lock.unlock();
58 ASSERT(was_locked == lock.owns_lock(), "unlock did not release the mutex while should?");
59 }
60 };
61
62 struct TestCases{
63 const char* filename;
64 int line;
65
66 TestCases(const char* a_filename, int a_line) : filename(a_filename), line(a_line) {}
67
68 void TestMoveConstructor(source locked_src){
69 MutexAndLockFixture src(locked_src);
70 unique_lock_t dst_lock(std::move(src.lock));
71 AssertOwnershipWasTransfered(dst_lock, src.lock, src.was_locked, &src.mutex);
72 }
73
74 void TestMoveAssignment(source locked_src, destination locked_dest){
75 MutexAndLockFixture src(locked_src);
76 MutexAndLockFixture dst(locked_dest);
77
78 dst.lock = std::move(src.lock);
79 ASSERT_CUSTOM(unique_lock_t(dst.mutex, try_to_lock).owns_lock(), "unique_lock should release owned mutex on assignment", filename, line);
80 AssertOwnershipWasTransfered(dst.lock, src.lock, src.was_locked, &src.mutex);
81 }
82
83 void AssertOwnershipWasTransfered(unique_lock_t const& dest_lock, unique_lock_t const& src_lck, const bool was_locked, const M* mutex) {
84 ASSERT_CUSTOM(dest_lock.owns_lock() == was_locked, "moved to lock object should have the same state as source before move", filename, line);
85 ASSERT_CUSTOM(dest_lock.mutex() == mutex, "moved to lock object should have the same state as source before move", filename, line);
86 ASSERT_CUSTOM(src_lck.owns_lock() == false, "moved from lock object must not left locked", filename, line);
87 ASSERT_CUSTOM(src_lck.mutex() == NULL, "moved from lock object must not has mutex", filename, line);
88 }
89 };
90//TODO: to rework this with an assertion binder
91#define AT_LOCATION() TestCases( __FILE__, __LINE__) \
92
93 AT_LOCATION().TestMoveConstructor(source(locked));
94 AT_LOCATION().TestMoveAssignment (source(locked), destination(locked));
95 AT_LOCATION().TestMoveAssignment (source(locked), destination(unlocked));
96 AT_LOCATION().TestMoveConstructor(source(unlocked));
97 AT_LOCATION().TestMoveAssignment (source(unlocked), destination(locked));
98 AT_LOCATION().TestMoveAssignment (source(unlocked), destination(unlocked));
99
100#undef AT_LOCATION
101
102}
103#endif //__TBB_CPP11_RVALUE_REF_PRESENT
104
105template<typename M>
106struct Counter {
107 typedef M mutex_type;
108 M mutex;
109 volatile long value;
110 void flog_once_lock_guard( size_t mode );
111 void flog_once_unique_lock( size_t mode );
112};
113
114template<typename M>
115void Counter<M>::flog_once_lock_guard(size_t mode)
116/** Increments counter once for each iteration in the iteration space. */
117{
118 if( mode&1 ) {
119 // Try acquire and release with implicit lock_guard
120 // precondition: if mutex_type is not a recursive mutex, the calling thread does not own the mutex m.
121 // if the precondition is not met, either dead-lock incorrect 'value' would result in.
122 lock_guard<M> lg(mutex);
123 value = value+1;
124 } else {
125 // Try acquire and release with adopt lock_quard
126 // precodition: the calling thread owns the mutex m.
127 // if the precondition is not met, incorrect 'value' would result in because the thread unlocks
128 // mutex that it does not own.
129 mutex.lock();
130 lock_guard<M> lg( mutex, adopt_lock );
131 value = value+1;
132 }
133}
134
135template<typename M>
136void Counter<M>::flog_once_unique_lock(size_t mode)
137/** Increments counter once for each iteration in the iteration space. */
138{
139 switch( mode&7 ) {
140 case 0:
141 {// implicitly acquire and release mutex with unique_lock
142 unique_lock<M> ul( mutex );
143 value = value+1;
144 ASSERT( ul==true, NULL );
145 }
146 break;
147 case 1:
148 {// unique_lock with defer_lock
149 unique_lock<M> ul( mutex, defer_lock );
150 ASSERT( ul.owns_lock()==false, NULL );
151 ul.lock();
152 value = value+1;
153 ASSERT( ul.owns_lock()==true, NULL );
154 }
155 break;
156 case 2:
157 {// unique_lock::try_lock() with try_to_lock
158 unique_lock<M> ul( mutex, try_to_lock );
159 if( !ul )
160 while( !ul.try_lock() )
161 __TBB_Yield();
162 value = value+1;
163 }
164 break;
165 case 3:
166 {// unique_lock::try_lock_for() with try_to_lock
167 unique_lock<M> ul( mutex, defer_lock );
168 tbb::tick_count::interval_t i(1.0);
169 while( !ul.try_lock_for( i ) )
170 ;
171 value = value+1;
172 ASSERT( ul.owns_lock()==true, NULL );
173 }
174 break;
175 case 4:
176 {
177 unique_lock<M> ul_o4;
178 {// unique_lock with adopt_lock
179 mutex.lock();
180 unique_lock<M> ul( mutex, adopt_lock );
181 value = value+1;
182 ASSERT( ul.owns_lock()==true, NULL );
183 ASSERT( ul.mutex()==&mutex, NULL );
184 ASSERT( ul_o4.owns_lock()==false, NULL );
185 ASSERT( ul_o4.mutex()==NULL, NULL );
186 swap( ul, ul_o4 );
187 ASSERT( ul.owns_lock()==false, NULL );
188 ASSERT( ul.mutex()==NULL, NULL );
189 ASSERT( ul_o4.owns_lock()==true, NULL );
190 ASSERT( ul_o4.mutex()==&mutex, NULL );
191 ul_o4.unlock();
192 }
193 ASSERT( ul_o4.owns_lock()==false, NULL );
194 }
195 break;
196 case 5:
197 {
198 unique_lock<M> ul_o5;
199 {// unique_lock with adopt_lock
200 mutex.lock();
201 unique_lock<M> ul( mutex, adopt_lock );
202 value = value+1;
203 ASSERT( ul.owns_lock()==true, NULL );
204 ASSERT( ul.mutex()==&mutex, NULL );
205 ASSERT( ul_o5.owns_lock()==false, NULL );
206 ASSERT( ul_o5.mutex()==NULL, NULL );
207 ul_o5.swap( ul );
208 ASSERT( ul.owns_lock()==false, NULL );
209 ASSERT( ul.mutex()==NULL, NULL );
210 ASSERT( ul_o5.owns_lock()==true, NULL );
211 ASSERT( ul_o5.mutex()==&mutex, NULL );
212 ul_o5.unlock();
213 }
214 ASSERT( ul_o5.owns_lock()==false, NULL );
215 }
216 break;
217 default:
218 {// unique_lock with adopt_lock, and release()
219 mutex.lock();
220 unique_lock<M> ul( mutex, adopt_lock );
221 ASSERT( ul==true, NULL );
222 value = value+1;
223 M* old_m = ul.release();
224 old_m->unlock();
225 ASSERT( ul.owns_lock()==false, NULL );
226 }
227 break;
228 }
229}
230
231static tbb::atomic<size_t> Order;
232
233template<typename State, long TestSize>
234struct WorkForLocks: NoAssign {
235 static const size_t chunk = 100;
236 State& state;
237 WorkForLocks( State& state_ ) : state(state_) {}
238 void operator()( int ) const {
239 size_t step;
240 while( (step=Order.fetch_and_add<tbb::acquire>(chunk))<TestSize ) {
241 for( size_t i=0; i<chunk && step<TestSize; ++i, ++step ) {
242 state.flog_once_lock_guard(step);
243 state.flog_once_unique_lock(step);
244 }
245 }
246 }
247};
248
249template<typename M>
250void TestLocks( const char* name, int nthread ) {
251 REMARK("testing %s in TestLocks\n",name);
252 Counter<M> counter;
253 counter.value = 0;
254 Order = 0;
255 // use the macro because of a gcc 4.6 bug
256#define TEST_SIZE 100000
257 NativeParallelFor( nthread, WorkForLocks<Counter<M>, TEST_SIZE>(counter) );
258
259 if( counter.value!=2*TEST_SIZE )
260 REPORT("ERROR for %s in TestLocks: counter.value=%ld != 2 * %ld=test_size\n",name,counter.value,TEST_SIZE);
261#undef TEST_SIZE
262}
263
264static tbb::atomic<int> barrier;
265
266// Test if the constructor works and if native_handle() works
267template<typename M>
268struct WorkForCondVarCtor: NoAssign {
269 condition_variable& my_cv;
270 M& my_mtx;
271 WorkForCondVarCtor( condition_variable& cv_, M& mtx_ ) : my_cv(cv_), my_mtx(mtx_) {}
272 void operator()( int tid ) const {
273 ASSERT( tid<=1, NULL ); // test with 2 threads.
274 condition_variable::native_handle_type handle = my_cv.native_handle();
275 if( tid&1 ) {
276 my_mtx.lock();
277 ++barrier;
278#if _WIN32||_WIN64
279 if( !tbb::interface5::internal::internal_condition_variable_wait( *handle, &my_mtx ) ) {
280 int ec = GetLastError();
281 ASSERT( ec!=WAIT_TIMEOUT, NULL );
282 throw_exception( tbb::internal::eid_condvar_wait_failed );
283 }
284#else
285 if( pthread_cond_wait( handle, my_mtx.native_handle() ) )
286 throw_exception( tbb::internal::eid_condvar_wait_failed );
287#endif
288 ++barrier;
289 my_mtx.unlock();
290 } else {
291 bool res;
292 while( (res=my_mtx.try_lock())==true && barrier==0 ) {
293 my_mtx.unlock();
294 __TBB_Yield();
295 }
296 if( res ) my_mtx.unlock();
297 do {
298#if _WIN32||_WIN64
299 tbb::interface5::internal::internal_condition_variable_notify_one( *handle );
300#else
301 pthread_cond_signal( handle );
302#endif
303 __TBB_Yield();
304 } while ( barrier<2 );
305 }
306 }
307};
308
309static condition_variable* test_cv;
310static tbb::atomic<int> n_waiters;
311
312// Test if the destructor works
313template<typename M>
314struct WorkForCondVarDtor: NoAssign {
315 int nthread;
316 M& my_mtx;
317 WorkForCondVarDtor( int n, M& mtx_ ) : nthread(n), my_mtx(mtx_) {}
318 void operator()( int tid ) const {
319 if( tid==0 ) {
320 unique_lock<M> ul( my_mtx, defer_lock );
321 test_cv = new condition_variable;
322
323 while( n_waiters<nthread-1 )
324 __TBB_Yield();
325 ul.lock();
326 test_cv->notify_all();
327 ul.unlock();
328 while( n_waiters>0 )
329 __TBB_Yield();
330 delete test_cv;
331 } else {
332 while( test_cv==NULL )
333 __TBB_Yield();
334 unique_lock<M> ul(my_mtx);
335 ++n_waiters;
336 test_cv->wait( ul );
337 --n_waiters;
338 }
339 }
340};
341
342static const int max_ticket = 100;
343static const int short_delay = 10;
344static const int long_delay = 100;
345
346tbb::atomic<int> n_signaled;
347tbb::atomic<int> n_done, n_done_1, n_done_2;
348tbb::atomic<int> n_timed_out;
349
350static bool false_to_true;
351
352struct TestPredicateFalseToTrue {
353 TestPredicateFalseToTrue() {}
354 bool operator()() { return false_to_true; }
355};
356
357struct TestPredicateFalse {
358 TestPredicateFalse() {}
359 bool operator()() { return false; }
360};
361
362struct TestPredicateTrue {
363 TestPredicateTrue() {}
364 bool operator()() { return true; }
365};
366
367// Test timed wait and timed wait with pred
368template<typename M>
369struct WorkForCondVarTimedWait: NoAssign {
370 int nthread;
371 condition_variable& test_cv;
372 M& my_mtx;
373 WorkForCondVarTimedWait( int n_, condition_variable& cv_, M& mtx_ ) : nthread(n_), test_cv(cv_), my_mtx(mtx_) {}
374 void operator()( int tid ) const {
375 tbb::tick_count t1, t2;
376
377 unique_lock<M> ul( my_mtx, defer_lock );
378
379 ASSERT( n_timed_out==0, NULL );
380 ++barrier;
381 while( barrier<nthread ) __TBB_Yield();
382
383 // test if a thread times out with wait_for()
384 for( int i=1; i<10; ++i ) {
385 tbb::tick_count::interval_t intv((double)i*0.0999 /*seconds*/);
386 ul.lock();
387 cv_status st = no_timeout;
388 __TBB_TRY {
389 /** Some version of glibc return EINVAL instead 0 when spurious wakeup occurs on pthread_cond_timedwait() **/
390 st = test_cv.wait_for( ul, intv );
391 } __TBB_CATCH( std::runtime_error& ) {}
392 ASSERT( ul, "mutex should have been reacquired" );
393 ul.unlock();
394 if( st==timeout )
395 ++n_timed_out;
396 }
397
398 ASSERT( n_timed_out>0, "should have been timed-out at least once\n" );
399 ++n_done_1;
400 while( n_done_1<nthread ) __TBB_Yield();
401
402 for( int i=1; i<10; ++i ) {
403 tbb::tick_count::interval_t intv((double)i*0.0001 /*seconds*/);
404 ul.lock();
405 __TBB_TRY {
406 /** Some version of glibc return EINVAL instead 0 when spurious wakeup occurs on pthread_cond_timedwait() **/
407 ASSERT( false==test_cv.wait_for( ul, intv, TestPredicateFalse()), "incorrect return value" );
408 } __TBB_CATCH( std::runtime_error& ) {}
409 ASSERT( ul, "mutex should have been reacquired" );
410 ul.unlock();
411 }
412
413 if( tid==0 )
414 n_waiters = 0;
415 // barrier
416 ++n_done_2;
417 while( n_done_2<nthread ) __TBB_Yield();
418
419 // at this point, we know wait_for() successfully times out.
420 // so test if a thread blocked on wait_for() could receive a signal before its waiting time elapses.
421 if( tid==0 ) {
422 // signaler
423 n_signaled = 0;
424 ASSERT( n_waiters==0, NULL );
425 ++n_done_2; // open gate 1
426
427 while( n_waiters<(nthread-1) ) __TBB_Yield(); // wait until all other threads block on cv. flag_1
428
429 ul.lock();
430 test_cv.notify_all();
431 n_waiters = 0;
432 ul.unlock();
433
434 while( n_done_2<2*nthread ) __TBB_Yield();
435 ASSERT( n_signaled>0, "too small an interval?" );
436 n_signaled = 0;
437
438 } else {
439 while( n_done_2<nthread+1 ) __TBB_Yield(); // gate 1
440
441 // sleeper
442 tbb::tick_count::interval_t intv((double)2.0 /*seconds*/);
443 ul.lock();
444 ++n_waiters; // raise flag 1/(nthread-1)
445 t1 = tbb::tick_count::now();
446 cv_status st = test_cv.wait_for( ul, intv ); // gate 2
447 t2 = tbb::tick_count::now();
448 ul.unlock();
449 if( st==no_timeout ) {
450 ++n_signaled;
451 ASSERT( (t2-t1).seconds()<intv.seconds(), "got a signal after timed-out?" );
452 }
453 }
454
455 ASSERT( n_done==0, NULL );
456 ++n_done_2;
457
458 if( tid==0 ) {
459 ASSERT( n_waiters==0, NULL );
460 ++n_done; // open gate 3
461
462 while( n_waiters<(nthread-1) ) __TBB_Yield(); // wait until all other threads block on cv.
463 for( int i=0; i<2*short_delay; ++i ) __TBB_Yield(); // give some time to waiters so that all of them in the waitq
464 ul.lock();
465 false_to_true = true;
466 test_cv.notify_all(); // open gate 4
467 ul.unlock();
468
469 while( n_done<nthread ) __TBB_Yield(); // wait until all other threads wake up.
470 ASSERT( n_signaled>0, "too small an interval?" );
471 } else {
472
473 while( n_done<1 ) __TBB_Yield(); // gate 3
474
475 tbb::tick_count::interval_t intv((double)2.0 /*seconds*/);
476 ul.lock();
477 ++n_waiters;
478 // wait_for w/ predciate
479 t1 = tbb::tick_count::now();
480 ASSERT( test_cv.wait_for( ul, intv, TestPredicateFalseToTrue())==true, NULL ); // gate 4
481 t2 = tbb::tick_count::now();
482 ul.unlock();
483 if( (t2-t1).seconds()<intv.seconds() )
484 ++n_signaled;
485 ++n_done;
486 }
487 }
488};
489
490tbb::atomic<int> ticket_for_sleep, ticket_for_wakeup, signaled_ticket, wokeup_ticket;
491tbb::atomic<unsigned> n_visit_to_waitq;
492unsigned max_waitq_length;
493
494template<typename M>
495struct WorkForCondVarWaitAndNotifyOne: NoAssign {
496 int nthread;
497 condition_variable& test_cv;
498 M& my_mtx;
499 WorkForCondVarWaitAndNotifyOne( int n_, condition_variable& cv_, M& mtx_ ) : nthread(n_), test_cv(cv_), my_mtx(mtx_) {}
500 void operator()( int tid ) const {
501 if( tid&1 ) {
502 // exercise signal part
503 while( ticket_for_wakeup<max_ticket ) {
504 int my_ticket = ++ticket_for_wakeup; // atomically grab the next ticket
505 if( my_ticket>max_ticket )
506 break;
507
508 for( ;; ) {
509 unique_lock<M> ul( my_mtx, defer_lock );
510 ul.lock();
511 if( n_waiters>0 && my_ticket<=ticket_for_sleep && my_ticket==(wokeup_ticket+1) ) {
512 signaled_ticket = my_ticket;
513 test_cv.notify_one();
514 ++n_signaled;
515 ul.unlock();
516 break;
517 }
518 ul.unlock();
519 __TBB_Yield();
520 }
521
522 // give waiters time to go to sleep.
523 for( int m=0; m<short_delay; ++m )
524 __TBB_Yield();
525 }
526 } else {
527 while( ticket_for_sleep<max_ticket ) {
528 unique_lock<M> ul( my_mtx, defer_lock );
529 ul.lock();
530 // exercise wait part
531 int my_ticket = ++ticket_for_sleep; // grab my ticket
532 if( my_ticket>max_ticket ) break;
533
534 // each waiter should go to sleep at least once
535 unsigned nw = ++n_waiters;
536 for( ;; ) {
537 // update to max_waitq_length
538 if( nw>max_waitq_length ) max_waitq_length = nw;
539 ++n_visit_to_waitq;
540 test_cv.wait( ul );
541 // if( ret==false ) ++n_timedout;
542 ASSERT( ul, "mutex should have been locked" );
543 --n_waiters;
544 if( signaled_ticket==my_ticket ) {
545 wokeup_ticket = my_ticket;
546 break;
547 }
548 if( n_waiters>0 )
549 test_cv.notify_one();
550 nw = ++n_waiters; // update to max_waitq_length occurs above
551 }
552
553 ul.unlock();
554 __TBB_Yield(); // give other threads chance to run.
555 }
556 }
557 ++n_done;
558 spin_wait_until_eq( n_done, nthread );
559 ASSERT( n_signaled==max_ticket, "incorrect number of notifications sent" );
560 }
561};
562
563struct TestPredicate1 {
564 int target;
565 TestPredicate1( int i_ ) : target(i_) {}
566 bool operator()( ) { return signaled_ticket==target; }
567};
568
569template<typename M>
570struct WorkForCondVarWaitPredAndNotifyAll: NoAssign {
571 int nthread;
572 condition_variable& test_cv;
573 M& my_mtx;
574 int multiple;
575 WorkForCondVarWaitPredAndNotifyAll( int n_, condition_variable& cv_, M& mtx_, int m_ ) :
576 nthread(n_), test_cv(cv_), my_mtx(mtx_), multiple(m_) {}
577 void operator()( int tid ) const {
578 if( tid&1 ) {
579 while( ticket_for_sleep<max_ticket ) {
580 unique_lock<M> ul( my_mtx, defer_lock );
581 // exercise wait part
582 int my_ticket = ++ticket_for_sleep; // grab my ticket
583 if( my_ticket>max_ticket )
584 break;
585
586 ul.lock();
587 ++n_visit_to_waitq;
588 unsigned nw = ++n_waiters;
589 if( nw>max_waitq_length ) max_waitq_length = nw;
590 test_cv.wait( ul, TestPredicate1( my_ticket ) );
591 wokeup_ticket = my_ticket;
592 --n_waiters;
593 ASSERT( ul, "mutex should have been locked" );
594 ul.unlock();
595
596 __TBB_Yield(); // give other threads chance to run.
597 }
598 } else {
599 // exercise signal part
600 while( ticket_for_wakeup<max_ticket ) {
601 int my_ticket = ++ticket_for_wakeup; // atomically grab the next ticket
602 if( my_ticket>max_ticket )
603 break;
604
605 for( ;; ) {
606 unique_lock<M> ul( my_mtx );
607 if( n_waiters>0 && my_ticket<=ticket_for_sleep && my_ticket==(wokeup_ticket+1) ) {
608 signaled_ticket = my_ticket;
609 test_cv.notify_all();
610 ++n_signaled;
611 ul.unlock();
612 break;
613 }
614 ul.unlock();
615 __TBB_Yield();
616 }
617
618 // give waiters time to go to sleep.
619 for( int m=0; m<long_delay*multiple; ++m )
620 __TBB_Yield();
621 }
622 }
623 ++n_done;
624 spin_wait_until_eq( n_done, nthread );
625 ASSERT( n_signaled==max_ticket, "incorrect number of notifications sent" );
626 }
627};
628
629void InitGlobalCounters()
630{
631 ticket_for_sleep = ticket_for_wakeup = signaled_ticket = wokeup_ticket = 0;
632 n_waiters = 0;
633 n_signaled = 0;
634 n_done = n_done_1 = n_done_2 = 0;
635 n_visit_to_waitq = 0;
636 n_timed_out = 0;
637}
638
639template<typename M>
640void TestConditionVariable( const char* name, int nthread )
641{
642 REMARK("testing %s in TestConditionVariable\n",name);
643 Counter<M> counter;
644 M mtx;
645
646 ASSERT( nthread>1, "at least two threads are needed for testing condition_variable" );
647 REMARK(" - constructor\n" );
648 // Test constructor.
649 {
650 condition_variable cv1;
651#if _WIN32||_WIN64
652 condition_variable::native_handle_type handle = cv1.native_handle();
653 ASSERT( uintptr_t(&handle->cv_event)==uintptr_t(&handle->cv_native), NULL );
654#endif
655 M mtx1;
656 barrier = 0;
657 NativeParallelFor( 2, WorkForCondVarCtor<M>( cv1, mtx1 ) );
658 }
659
660 REMARK(" - destructor\n" );
661 // Test destructor.
662 {
663 M mtx2;
664 test_cv = NULL;
665 n_waiters = 0;
666 NativeParallelFor( nthread, WorkForCondVarDtor<M>( nthread, mtx2 ) );
667 }
668
669 REMARK(" - timed_wait (i.e., wait_for)\n");
670 // Test timed wait.
671 {
672 condition_variable cv_tw;
673 M mtx_tw;
674 barrier = 0;
675 InitGlobalCounters();
676 int nthr = nthread>4?4:nthread;
677 NativeParallelFor( nthr, WorkForCondVarTimedWait<M>( nthr, cv_tw, mtx_tw ) );
678 }
679
680 REMARK(" - wait with notify_one\n");
681 // Test wait and notify_one
682 do {
683 condition_variable cv3;
684 M mtx3;
685 InitGlobalCounters();
686 NativeParallelFor( nthread, WorkForCondVarWaitAndNotifyOne<M>( nthread, cv3, mtx3 ) );
687 } while( n_visit_to_waitq==0 || max_waitq_length==0 );
688
689 REMARK(" - predicated wait with notify_all\n");
690 // Test wait_pred and notify_all
691 int delay_multiple = 1;
692 do {
693 condition_variable cv4;
694 M mtx4;
695 InitGlobalCounters();
696 NativeParallelFor( nthread, WorkForCondVarWaitPredAndNotifyAll<M>( nthread, cv4, mtx4, delay_multiple ) );
697 if( max_waitq_length<unsigned(nthread/2) )
698 ++delay_multiple;
699 } while( n_visit_to_waitq<=0 || max_waitq_length<unsigned(nthread/2) );
700}
701
702#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
703static tbb::atomic<int> err_count;
704
705#define TRY_AND_CATCH_RUNTIME_ERROR(op,msg) \
706 try { \
707 op; \
708 ++err_count; \
709 } catch( std::runtime_error& e ) {ASSERT( strstr(e.what(), msg) , NULL );} catch(...) {++err_count;}
710
711template<typename M>
712void TestUniqueLockException( const char * name ) {
713 REMARK("testing %s TestUniqueLockException\n",name);
714 M mtx;
715 unique_lock<M> ul_0;
716 err_count = 0;
717
718 TRY_AND_CATCH_RUNTIME_ERROR( ul_0.lock(), "Operation not permitted" );
719 TRY_AND_CATCH_RUNTIME_ERROR( ul_0.try_lock(), "Operation not permitted" );
720
721 unique_lock<M> ul_1( mtx );
722
723 TRY_AND_CATCH_RUNTIME_ERROR( ul_1.lock(), "Resource deadlock" );
724 TRY_AND_CATCH_RUNTIME_ERROR( ul_1.try_lock(), "Resource deadlock" );
725
726 ul_1.unlock();
727 TRY_AND_CATCH_RUNTIME_ERROR( ul_1.unlock(), "Operation not permitted" );
728
729 ASSERT( !err_count, "Some exceptions are not thrown or incorrect ones are thrown" );
730}
731
732template<typename M>
733void TestConditionVariableException( const char * name ) {
734 REMARK("testing %s in TestConditionVariableException; yet to be implemented\n",name);
735}
736#endif /* TBB_USE_EXCEPTIONS */
737
738template<typename Mutex, typename RecursiveMutex>
739void DoCondVarTest()
740{
741#if __TBB_CPP11_RVALUE_REF_PRESENT
742 TestUniqueLockMoveConstructorAndAssignOp<Mutex>();
743 TestUniqueLockMoveConstructorAndAssignOp<RecursiveMutex>();
744#endif
745
746 for( int p=MinThread; p<=MaxThread; ++p ) {
747 REMARK( "testing with %d threads\n", p );
748 TestLocks<Mutex>( "mutex", p );
749 TestLocks<RecursiveMutex>( "recursive_mutex", p );
750
751 if( p<=1 ) continue;
752
753 // for testing condition_variable, at least one sleeper and one notifier are needed
754 TestConditionVariable<Mutex>( "mutex", p );
755 }
756#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
757 REPORT("Known issue: exception handling tests are skipped.\n");
758#elif TBB_USE_EXCEPTIONS
759 TestUniqueLockException<Mutex>( "mutex" );
760 TestUniqueLockException<RecursiveMutex>( "recursive_mutex" );
761 TestConditionVariableException<Mutex>( "mutex" );
762#endif /* TBB_USE_EXCEPTIONS */
763}
764