| 1 | /* |
| 2 | Copyright (c) 2005-2019 Intel Corporation |
| 3 | |
| 4 | Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | you may not use this file except in compliance with the License. |
| 6 | You may obtain a copy of the License at |
| 7 | |
| 8 | http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | |
| 10 | Unless required by applicable law or agreed to in writing, software |
| 11 | distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | See the License for the specific language governing permissions and |
| 14 | limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "tbb/tbb_config.h" |
| 18 | #include "harness.h" |
| 19 | |
| 20 | #if __TBB_GCC_STRICT_ALIASING_BROKEN |
| 21 | #pragma GCC diagnostic ignored "-Wstrict-aliasing" |
| 22 | #endif |
| 23 | |
| 24 | #if __TBB_TASK_GROUP_CONTEXT |
| 25 | |
| 26 | #include "tbb/task.h" |
| 27 | #include "tbb/task_scheduler_init.h" |
| 28 | #include "tbb/atomic.h" |
| 29 | #include <cstdlib> |
| 30 | |
| 31 | #if _MSC_VER && __TBB_NO_IMPLICIT_LINKAGE |
| 32 | // plays around __TBB_NO_IMPLICIT_LINKAGE. __TBB_LIB_NAME should be defined (in makefiles) |
| 33 | #pragma comment(lib, __TBB_STRING(__TBB_LIB_NAME)) |
| 34 | #endif |
| 35 | |
| 36 | const int NumIterations = 100; |
| 37 | const int NumLeafTasks = 2; |
| 38 | int MinBaseDepth = 8; |
| 39 | int MaxBaseDepth = 10; |
| 40 | int BaseDepth = 0; |
| 41 | |
| 42 | const int DesiredNumThreads = 12; |
| 43 | |
| 44 | const int NumTests = 8; |
| 45 | int TestSwitchBetweenMastersRepeats = 4; |
| 46 | |
| 47 | int g_NumMasters = 0; |
| 48 | volatile intptr_t *g_LeavesExecuted = NULL; |
| 49 | |
| 50 | int g_TestFailures[NumTests]; |
| 51 | int g_CurConfig = 0; |
| 52 | |
| 53 | int P = 0; |
| 54 | |
| 55 | #if !__TBB_TASK_PRIORITY |
| 56 | namespace tbb { |
| 57 | enum priority_t { |
| 58 | priority_low = 0, |
| 59 | priority_normal = 1, |
| 60 | priority_high = 2 |
| 61 | }; |
| 62 | } |
| 63 | #endif /* __TBB_TASK_PRIORITY */ |
| 64 | |
| 65 | tbb::priority_t Low, |
| 66 | High; |
| 67 | int PreemptionActivatorId = 1; |
| 68 | |
| 69 | enum Options { |
| 70 | NoPriorities = 0, |
| 71 | TestPreemption = 1, |
| 72 | Flog = 2, |
| 73 | FlogEncloser = Flog | 4 |
| 74 | }; |
| 75 | |
| 76 | const char *PriorityName(tbb::priority_t p) { |
| 77 | if( p == tbb::priority_high ) return "high" ; |
| 78 | if( p == tbb::priority_normal ) return "normal" ; |
| 79 | if( p == tbb::priority_low ) return "low" ; |
| 80 | return "bad" ; |
| 81 | } |
| 82 | |
| 83 | void PrepareGlobals ( int numMasters ) { |
| 84 | ASSERT( !g_NumMasters && !g_LeavesExecuted, NULL ); |
| 85 | g_NumMasters = numMasters; |
| 86 | if ( !g_LeavesExecuted ) |
| 87 | g_LeavesExecuted = new intptr_t[numMasters]; |
| 88 | g_CurConfig = 0; |
| 89 | memset( const_cast<intptr_t*>(g_LeavesExecuted), 0, sizeof(intptr_t) * numMasters ); |
| 90 | memset( g_TestFailures, 0, sizeof(int) * NumTests ); |
| 91 | } |
| 92 | |
| 93 | void ClearGlobals () { |
| 94 | ASSERT( g_LeavesExecuted, NULL ); |
| 95 | delete [] g_LeavesExecuted; |
| 96 | g_LeavesExecuted = NULL; |
| 97 | g_NumMasters = 0; |
| 98 | REMARK("\r \r" ); |
| 99 | } |
| 100 | |
| 101 | class LeafTask : public tbb::task { |
| 102 | int m_tid; |
| 103 | uintptr_t m_opts; |
| 104 | |
| 105 | tbb::task* execute () __TBB_override { |
| 106 | volatile int anchor = 0; |
| 107 | for ( int i = 0; i < NumIterations; ++i ) |
| 108 | anchor += i; |
| 109 | __TBB_FetchAndAddW(g_LeavesExecuted + m_tid, 1); |
| 110 | #if __TBB_TASK_PRIORITY |
| 111 | ASSERT( !m_opts || (m_opts & Flog) || (!(m_opts & TestPreemption) ^ (m_tid == PreemptionActivatorId)), NULL ); |
| 112 | if ( (m_opts & TestPreemption) && g_LeavesExecuted[0] > P && group_priority() == tbb::priority_normal ) { |
| 113 | ASSERT( m_tid == PreemptionActivatorId, NULL ); |
| 114 | ASSERT( (PreemptionActivatorId == 1 ? High > tbb::priority_normal : Low < tbb::priority_normal), NULL ); |
| 115 | set_group_priority( PreemptionActivatorId == 1 ? High : Low ); |
| 116 | } |
| 117 | #endif /* __TBB_TASK_PRIORITY */ |
| 118 | return NULL; |
| 119 | } |
| 120 | public: |
| 121 | LeafTask ( int tid, uintptr_t opts ) : m_tid(tid), m_opts(opts) { |
| 122 | ASSERT( tid < g_NumMasters, NULL ); |
| 123 | } |
| 124 | }; |
| 125 | |
| 126 | template<class NodeType> |
| 127 | class NodeTask : public tbb::task { |
| 128 | protected: |
| 129 | int m_tid; |
| 130 | int m_depth; |
| 131 | uintptr_t m_opts; |
| 132 | task *m_root; |
| 133 | |
| 134 | void SpawnChildren ( task* parent_node ) { |
| 135 | ASSERT( m_depth > 0, NULL ); |
| 136 | if ( g_LeavesExecuted[m_tid] % (100 / m_depth) == 0 ) { |
| 137 | if ( m_opts & Flog ) { |
| 138 | #if __TBB_TASK_PRIORITY |
| 139 | task *r = m_opts & FlogEncloser ? this : m_root; |
| 140 | tbb::priority_t p = r->group_priority(); |
| 141 | r->set_group_priority( p == Low ? High : Low ); |
| 142 | #endif /* __TBB_TASK_PRIORITY */ |
| 143 | } |
| 144 | else |
| 145 | __TBB_Yield(); |
| 146 | } |
| 147 | parent_node->set_ref_count(NumLeafTasks + 1); |
| 148 | --m_depth; |
| 149 | for ( int i = 0; i < NumLeafTasks; ++i ) { |
| 150 | task *t = m_depth ? (task*) new(parent_node->allocate_child()) NodeType(m_tid, m_depth, m_opts, m_root) |
| 151 | : (task*) new(parent_node->allocate_child()) LeafTask(m_tid, m_opts); |
| 152 | task::spawn(*t); |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | public: |
| 157 | NodeTask ( int tid, int _depth, uintptr_t opts, task *r = NULL ) |
| 158 | : m_tid(tid), m_depth(_depth), m_opts(opts), m_root(r) |
| 159 | {} |
| 160 | }; |
| 161 | |
| 162 | class NestedGroupNodeTask : public NodeTask<NestedGroupNodeTask> { |
| 163 | task* execute () __TBB_override { |
| 164 | tbb::task_group_context ctx; // Use bound context |
| 165 | tbb::empty_task &r = *new( task::allocate_root(ctx) ) tbb::empty_task; |
| 166 | SpawnChildren(&r); |
| 167 | r.wait_for_all(); |
| 168 | task::destroy(r); |
| 169 | return NULL; |
| 170 | } |
| 171 | public: |
| 172 | NestedGroupNodeTask ( int tid, int _depth, uintptr_t opts, task *r = NULL ) |
| 173 | : NodeTask<NestedGroupNodeTask>(tid, _depth, opts, r) |
| 174 | {} |
| 175 | }; |
| 176 | |
| 177 | class BlockingNodeTask : public NodeTask<BlockingNodeTask> { |
| 178 | task* execute () __TBB_override { |
| 179 | SpawnChildren(this); |
| 180 | wait_for_all(); |
| 181 | return NULL; |
| 182 | } |
| 183 | public: |
| 184 | BlockingNodeTask ( int tid, int _depth, uintptr_t opts, task *r = NULL ) |
| 185 | : NodeTask<BlockingNodeTask>(tid, _depth, opts, r) {} |
| 186 | }; |
| 187 | |
| 188 | class NonblockingNodeTask : public NodeTask<NonblockingNodeTask> { |
| 189 | task* execute () __TBB_override { |
| 190 | if ( m_depth < 0 ) |
| 191 | return NULL; // I'm just a continuation now |
| 192 | recycle_as_safe_continuation(); |
| 193 | SpawnChildren(this); |
| 194 | m_depth = -1; |
| 195 | return NULL; |
| 196 | } |
| 197 | public: |
| 198 | NonblockingNodeTask ( int tid, int _depth, uintptr_t opts, task *r = NULL ) |
| 199 | : NodeTask<NonblockingNodeTask>(tid, _depth, opts, r) |
| 200 | {} |
| 201 | }; |
| 202 | |
| 203 | template<class NodeType> |
| 204 | class MasterBodyBase : NoAssign, Harness::NoAfterlife { |
| 205 | protected: |
| 206 | uintptr_t m_opts; |
| 207 | |
| 208 | public: |
| 209 | void RunTaskForest ( int id ) const { |
| 210 | ASSERT( id < g_NumMasters, NULL ); |
| 211 | g_LeavesExecuted[id] = 0; |
| 212 | int d = BaseDepth + id; |
| 213 | tbb::task_scheduler_init init(P-1); |
| 214 | tbb::task_group_context ctx (tbb::task_group_context::isolated); |
| 215 | tbb::empty_task &r = *new( tbb::task::allocate_root(ctx) ) tbb::empty_task; |
| 216 | const int R = 4; |
| 217 | r.set_ref_count( R * P + 1 ); |
| 218 | // Only PreemptionActivator thread changes its task tree priority in preemption test mode |
| 219 | const uintptr_t opts = (id == PreemptionActivatorId) ? m_opts : (m_opts & ~(uintptr_t)TestPreemption); |
| 220 | for ( int i = 0; i < R; ++i ) { |
| 221 | for ( int j = 1; j < P; ++j ) |
| 222 | r.spawn( *new(r.allocate_child()) NodeType(id, MinBaseDepth + id, opts, &r) ); |
| 223 | r.spawn( *new(r.allocate_child()) NodeType(id, d, opts, &r) ); |
| 224 | } |
| 225 | int count = 1; |
| 226 | intptr_t lastExecuted = 0; |
| 227 | while ( r.ref_count() > 1 ) { |
| 228 | // Give workers time to make some progress. |
| 229 | for ( int i = 0; i < 10 * count; ++i ) |
| 230 | __TBB_Yield(); |
| 231 | #if __TBB_TASK_PRIORITY |
| 232 | if ( lastExecuted == g_LeavesExecuted[id] ) { |
| 233 | // No progress. Likely all workers left to higher priority arena, |
| 234 | // and then returned to RML. Request workers back from RML. |
| 235 | tbb::task::enqueue( *new(tbb::task::allocate_root() ) tbb::empty_task, id == 0 ? Low : High ); |
| 236 | Harness::Sleep(count); |
| 237 | #if __TBB_ipf |
| 238 | // Increased sleep periods are required on systems with unfair hyperthreading (Itanium(R) 2 processor) |
| 239 | count += 10; |
| 240 | #endif |
| 241 | } |
| 242 | else { |
| 243 | count = 1; |
| 244 | lastExecuted = g_LeavesExecuted[id]; |
| 245 | } |
| 246 | #else /* !__TBB_TASK_PRIORITY */ |
| 247 | (void)lastExecuted; |
| 248 | tbb::task::enqueue( *new(tbb::task::allocate_root() ) tbb::empty_task ); |
| 249 | #endif /* !__TBB_TASK_PRIORITY */ |
| 250 | } |
| 251 | ASSERT( g_LeavesExecuted[id] == R * ((1 << d) + ((P - 1) * (1 << (MinBaseDepth + id)))), NULL ); |
| 252 | g_LeavesExecuted[id] = -1; |
| 253 | tbb::task::destroy(r); |
| 254 | } |
| 255 | |
| 256 | MasterBodyBase ( uintptr_t opts ) : m_opts(opts) {} |
| 257 | }; |
| 258 | |
| 259 | template<class NodeType> |
| 260 | class MasterBody : public MasterBodyBase<NodeType> { |
| 261 | int m_testIndex; |
| 262 | public: |
| 263 | void operator() ( int id ) const { |
| 264 | this->RunTaskForest(id); |
| 265 | if ( this->m_opts & Flog ) |
| 266 | return; |
| 267 | if ( this->m_opts & TestPreemption ) { |
| 268 | if ( id == 1 && g_LeavesExecuted[0] == -1 ) { |
| 269 | //REMARK( "Warning: Low priority master finished too early [depth %d]\n", Depth ); |
| 270 | ++g_TestFailures[m_testIndex]; |
| 271 | } |
| 272 | } |
| 273 | else { |
| 274 | if ( id == 0 && g_LeavesExecuted[1] == -1 ) { |
| 275 | //REMARK( "Warning: Faster master takes too long [depth %d]\n", Depth ); |
| 276 | ++g_TestFailures[m_testIndex]; |
| 277 | } |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | MasterBody ( int idx, uintptr_t opts ) : MasterBodyBase<NodeType>(opts), m_testIndex(idx) {} |
| 282 | }; |
| 283 | |
| 284 | template<class NodeType> |
| 285 | void RunPrioritySwitchBetweenTwoMasters ( int idx, uintptr_t opts ) { |
| 286 | ASSERT( idx < NumTests, NULL ); |
| 287 | REMARK( "Config %d: idx=%i, opts=%u\r" , ++g_CurConfig, idx, (unsigned)opts ); |
| 288 | NativeParallelFor ( 2, MasterBody<NodeType>(idx, opts) ); |
| 289 | Harness::Sleep(50); |
| 290 | } |
| 291 | |
| 292 | void TestPrioritySwitchBetweenTwoMasters () { |
| 293 | if ( P > DesiredNumThreads ) { |
| 294 | REPORT_ONCE( "Known issue: TestPrioritySwitchBetweenTwoMasters is skipped for big number of threads\n" ); |
| 295 | return; |
| 296 | } |
| 297 | tbb::task_scheduler_init init; // keeps the market alive to reduce the amount of TBB warnings |
| 298 | REMARK( "Stress tests: %s / %s \n" , Low == tbb::priority_low ? "Low" : "Normal" , High == tbb::priority_normal ? "Normal" : "High" ); |
| 299 | PrepareGlobals( 2 ); |
| 300 | for ( int i = 0; i < TestSwitchBetweenMastersRepeats; ++i ) { |
| 301 | for ( BaseDepth = MinBaseDepth; BaseDepth <= MaxBaseDepth; ++BaseDepth ) { |
| 302 | RunPrioritySwitchBetweenTwoMasters<BlockingNodeTask>( 0, NoPriorities ); |
| 303 | RunPrioritySwitchBetweenTwoMasters<BlockingNodeTask>( 1, TestPreemption ); |
| 304 | RunPrioritySwitchBetweenTwoMasters<NonblockingNodeTask>( 2, NoPriorities ); |
| 305 | RunPrioritySwitchBetweenTwoMasters<NonblockingNodeTask>( 3, TestPreemption ); |
| 306 | if ( i == 0 ) { |
| 307 | RunPrioritySwitchBetweenTwoMasters<BlockingNodeTask>( 4, Flog ); |
| 308 | RunPrioritySwitchBetweenTwoMasters<NonblockingNodeTask>( 5, Flog ); |
| 309 | RunPrioritySwitchBetweenTwoMasters<NestedGroupNodeTask>( 6, Flog ); |
| 310 | RunPrioritySwitchBetweenTwoMasters<NestedGroupNodeTask>( 7, FlogEncloser ); |
| 311 | } |
| 312 | } |
| 313 | } |
| 314 | #if __TBB_TASK_PRIORITY |
| 315 | const int NumRuns = TestSwitchBetweenMastersRepeats * (MaxBaseDepth - MinBaseDepth + 1); |
| 316 | for ( int i = 0; i < NumTests; ++i ) { |
| 317 | if ( g_TestFailures[i] ) |
| 318 | REMARK( "Test %d: %d failures in %d runs\n" , i, g_TestFailures[i], NumRuns ); |
| 319 | if ( g_TestFailures[i] * 100 / NumRuns > 50 ) { |
| 320 | if ( i == 1 ) |
| 321 | REPORT_ONCE( "Known issue: priority effect is limited in case of blocking-style nesting\n" ); |
| 322 | else |
| 323 | REPORT( "Warning: test %d misbehaved too often (%d out of %d)\n" , i, g_TestFailures[i], NumRuns ); |
| 324 | } |
| 325 | } |
| 326 | #endif /* __TBB_TASK_PRIORITY */ |
| 327 | ClearGlobals(); |
| 328 | } |
| 329 | |
| 330 | class SingleChildRootTask : public tbb::task { |
| 331 | tbb::task* execute () __TBB_override { |
| 332 | set_ref_count(2); |
| 333 | spawn ( *new(allocate_child()) tbb::empty_task ); |
| 334 | wait_for_all(); |
| 335 | return NULL; |
| 336 | } |
| 337 | }; |
| 338 | |
| 339 | int TestSimplePriorityOps ( tbb::priority_t prio ) { |
| 340 | tbb::task_scheduler_init init; |
| 341 | tbb::task_group_context ctx; |
| 342 | #if __TBB_TASK_PRIORITY |
| 343 | ctx.set_priority( prio ); |
| 344 | #else /* !__TBB_TASK_PRIORITY */ |
| 345 | (void)prio; |
| 346 | #endif /* !__TBB_TASK_PRIORITY */ |
| 347 | tbb::task *r = new( tbb::task::allocate_root(ctx) ) tbb::empty_task; |
| 348 | r->set_ref_count(2); |
| 349 | r->spawn ( *new(r->allocate_child()) tbb::empty_task ); |
| 350 | REMARK( "TestSimplePriorityOps: waiting for a child\n" ); |
| 351 | r->wait_for_all(); |
| 352 | ASSERT( !r->ref_count(), NULL ); |
| 353 | REMARK( "TestLowPriority: executing an empty root\n" ); |
| 354 | tbb::task::spawn_root_and_wait(*r); |
| 355 | r = new( tbb::task::allocate_root(ctx) ) SingleChildRootTask; |
| 356 | REMARK( "TestLowPriority: executing a root with a single child\n" ); |
| 357 | tbb::task::spawn_root_and_wait(*r); |
| 358 | return 0; |
| 359 | } |
| 360 | |
| 361 | #include "tbb/parallel_for.h" |
| 362 | |
| 363 | void EmulateWork( int ) { |
| 364 | for ( int i = 0; i < 1000; ++i ) |
| 365 | __TBB_Yield(); |
| 366 | } |
| 367 | |
| 368 | class PeriodicActivitiesBody { |
| 369 | public: |
| 370 | static const int parallelIters[2]; |
| 371 | static const int seqIters[2]; |
| 372 | static int mode; |
| 373 | void operator() ( int id ) const { |
| 374 | tbb::task_group_context ctx; |
| 375 | #if __TBB_TASK_PRIORITY |
| 376 | ctx.set_priority( id ? High : Low ); |
| 377 | #else /* !__TBB_TASK_PRIORITY */ |
| 378 | (void)id; |
| 379 | #endif /* !__TBB_TASK_PRIORITY */ |
| 380 | for ( int i = 0; i < seqIters[mode]; ++i ) { |
| 381 | tbb::task_scheduler_init init; |
| 382 | tbb::parallel_for( 1, parallelIters[mode], &EmulateWork, ctx ); |
| 383 | } |
| 384 | } |
| 385 | }; |
| 386 | |
| 387 | const int PeriodicActivitiesBody::parallelIters[] = {10000, 100}; |
| 388 | const int PeriodicActivitiesBody::seqIters[] = {5, 2}; |
| 389 | int PeriodicActivitiesBody::mode = 0; |
| 390 | |
| 391 | void TestPeriodicConcurrentActivities () { |
| 392 | REMARK( "TestPeriodicConcurrentActivities: %s / %s \n" , Low == tbb::priority_low ? "Low" : "Normal" , High == tbb::priority_normal ? "Normal" : "High" ); |
| 393 | NativeParallelFor ( 2, PeriodicActivitiesBody() ); |
| 394 | } |
| 395 | |
| 396 | #include "harness_bad_expr.h" |
| 397 | |
| 398 | void TestPriorityAssertions () { |
| 399 | #if TRY_BAD_EXPR_ENABLED && __TBB_TASK_PRIORITY |
| 400 | REMARK( "TestPriorityAssertions\n" ); |
| 401 | tbb::task_scheduler_init init; // to avoid autoinit that'd affect subsequent tests |
| 402 | tbb::priority_t bad_low_priority = tbb::priority_t( tbb::priority_low - 1 ), |
| 403 | bad_high_priority = tbb::priority_t( tbb::priority_high + 1 ); |
| 404 | tbb::task_group_context ctx; |
| 405 | // Catch assertion failures |
| 406 | tbb::set_assertion_handler( AssertionFailureHandler ); |
| 407 | TRY_BAD_EXPR( ctx.set_priority( bad_low_priority ), "Invalid priority level value" ); |
| 408 | tbb::task &t = *new( tbb::task::allocate_root() ) tbb::empty_task; |
| 409 | TRY_BAD_EXPR( tbb::task::enqueue( t, bad_high_priority ), "Invalid priority level value" ); |
| 410 | // Restore normal assertion handling |
| 411 | tbb::set_assertion_handler( ReportError ); |
| 412 | #endif /* TRY_BAD_EXPR_ENABLED && __TBB_TASK_PRIORITY */ |
| 413 | } |
| 414 | |
| 415 | #if __TBB_TASK_PRIORITY |
| 416 | |
| 417 | tbb::atomic<tbb::priority_t> g_order; |
| 418 | tbb::atomic<bool> g_order_established; |
| 419 | tbb::atomic<int> g_num_tasks; |
| 420 | tbb::atomic<bool> g_all_tasks_enqueued; |
| 421 | int g_failures; |
| 422 | class OrderedTask : public tbb::task { |
| 423 | tbb::priority_t my_priority; |
| 424 | public: |
| 425 | OrderedTask(tbb::priority_t p) : my_priority(p) { |
| 426 | ++g_num_tasks; |
| 427 | } |
| 428 | tbb::task* execute() __TBB_override { |
| 429 | tbb::priority_t prev = g_order.fetch_and_store(my_priority); |
| 430 | if( my_priority != prev) { |
| 431 | REMARK("prev:%s --> new:%s\n" , PriorityName(prev), PriorityName(my_priority)); |
| 432 | // TODO: improve the test for concurrent workers |
| 433 | if(!g_order_established) { |
| 434 | // initial transition path allowed low->[normal]->high |
| 435 | if(my_priority == tbb::priority_high) |
| 436 | g_order_established = true; |
| 437 | else ASSERT(my_priority == tbb::priority_normal && prev == tbb::priority_low, NULL); |
| 438 | } else { //transition path allowed high->normal->low |
| 439 | bool fail = prev==tbb::priority_high && my_priority!=tbb::priority_normal; // previous priority is high - bad order |
| 440 | fail |= prev==tbb::priority_normal && my_priority!=tbb::priority_low; // previous priority is normal - bad order |
| 441 | fail |= prev==tbb::priority_low; // transition from low priority but not during initialization |
| 442 | if ( fail ) { |
| 443 | if ( g_all_tasks_enqueued ) |
| 444 | REPORT_ONCE( "ERROR: Bad order: prev = %s, my_priority = %s\n" , PriorityName( prev ), PriorityName( my_priority ) ); |
| 445 | ++g_failures; |
| 446 | } |
| 447 | } |
| 448 | } |
| 449 | EmulateWork(0); |
| 450 | --g_num_tasks; |
| 451 | return NULL; |
| 452 | } |
| 453 | static void start(int i) { |
| 454 | tbb::priority_t p = i%3==0? tbb::priority_low : (i%3==1? tbb::priority_normal : tbb::priority_high ); |
| 455 | OrderedTask &t = *new(tbb::task::allocate_root()) OrderedTask(p); |
| 456 | tbb::task::enqueue(t, p); |
| 457 | } |
| 458 | }; |
| 459 | |
| 460 | //Look for discussion of the issue at http://software.intel.com/en-us/forums/showthread.php?t=102159 |
| 461 | void TestEnqueueOrder () { |
| 462 | REMARK("Testing order of enqueued tasks\n" ); |
| 463 | tbb::task_scheduler_init init(1); // to simplify transition checks only one extra worker for enqueue |
| 464 | g_order = tbb::priority_low; |
| 465 | g_order_established = false; |
| 466 | g_all_tasks_enqueued = false; |
| 467 | g_failures = 0; |
| 468 | for( int i = 0; i < 1000; i++) |
| 469 | OrderedTask::start(i); |
| 470 | if ( int curr_num_tasks = g_num_tasks ) { |
| 471 | // Sync with worker not to set g_all_tasks_enqueued too early. |
| 472 | while ( curr_num_tasks == g_num_tasks ) __TBB_Yield(); |
| 473 | } |
| 474 | g_all_tasks_enqueued = true; |
| 475 | while( g_order == tbb::priority_low && g_num_tasks>0 ) __TBB_Yield(); |
| 476 | while( g_order != tbb::priority_low && g_num_tasks>0 ) __TBB_Yield(); |
| 477 | // We cannot differentiate if this misbehavior is caused by the test or by the implementation. |
| 478 | // Howerever, we do not promise mandatory priorities so we can state that the misbehavior in less |
| 479 | // than 1% cases is our best effort. |
| 480 | ASSERT( g_failures < 5, "Too many failures" ); |
| 481 | } |
| 482 | |
| 483 | namespace test_propagation { |
| 484 | |
| 485 | // This test creates two binary trees of task_group_context objects. |
| 486 | // Indices in a binary tree have the following layout: |
| 487 | // [1]--> [2] -> [4],[5] |
| 488 | // \-> [3] -> [6],[7] |
| 489 | static const int first = 1, last = 7; |
| 490 | tbb::task_group_context* g_trees[2][/*last+1*/8]; |
| 491 | tbb::task_group_context* g_default_ctx; |
| 492 | tbb::atomic<int> g_barrier; |
| 493 | tbb::atomic<bool> is_finished; |
| 494 | |
| 495 | class TestSetPriorityTask : public tbb::task { |
| 496 | const int m_tree, m_i; |
| 497 | public: |
| 498 | TestSetPriorityTask(int t, int i) : m_tree(t), m_i(i) {} |
| 499 | tbb::task* execute() __TBB_override { |
| 500 | if( !m_i ) { // the first task creates two trees |
| 501 | g_default_ctx = group(); |
| 502 | for( int i = 0; i <= 1; ++i ) { |
| 503 | g_trees[i][1] = new tbb::task_group_context( tbb::task_group_context::isolated ); |
| 504 | tbb::task::spawn(*new(tbb::task::allocate_root(*g_trees[i][1])) TestSetPriorityTask(i, 1)); |
| 505 | } |
| 506 | } |
| 507 | else if( m_i <= last/2 ) { // is divisible |
| 508 | for( int i = 0; i <= 1; ++i ) { |
| 509 | const int index = 2*m_i + i; |
| 510 | g_trees[m_tree][index] = new tbb::task_group_context ( tbb::task_group_context::bound ); |
| 511 | tbb::task::spawn(*new(tbb::task::allocate_root(*g_trees[m_tree][index])) TestSetPriorityTask(m_tree, index)); |
| 512 | } |
| 513 | } |
| 514 | --g_barrier; |
| 515 | //REMARK("Task %i executing\n", m_i); |
| 516 | while (!is_finished) __TBB_Yield(); |
| 517 | change_group(*g_default_ctx); // avoid races with destruction of custom contexts |
| 518 | --g_barrier; |
| 519 | return NULL; |
| 520 | } |
| 521 | }; |
| 522 | |
| 523 | // Tests task_group_context state propagation, also for cancellation. |
| 524 | void TestSetPriority() { |
| 525 | REMARK("Testing set_priority() with existing forest\n" ); |
| 526 | const int workers = last*2+1; // +1 is worker thread executing the first task |
| 527 | const int max_workers = 4*tbb::task_scheduler_init::default_num_threads(); |
| 528 | if ( workers+1 > max_workers ) { |
| 529 | REPORT( "Known issue: TestSetPriority requires %d threads but due to 4P hard limit the maximum number of threads is %d\n" , workers+1, max_workers ); |
| 530 | return; |
| 531 | } |
| 532 | tbb::task_scheduler_init init(workers+1); // +1 is master thread |
| 533 | g_barrier = workers; |
| 534 | is_finished = false; |
| 535 | tbb::task::spawn(*new(tbb::task::allocate_root()) TestSetPriorityTask(0,0)); |
| 536 | while(g_barrier) __TBB_Yield(); |
| 537 | g_trees[0][2]->set_priority(tbb::priority_high); |
| 538 | g_trees[0][4]->set_priority(tbb::priority_normal); |
| 539 | g_trees[1][3]->set_priority(tbb::priority_high); // Regression test: it must not set priority_high to g_trees[0][4] |
| 540 | // - 1 2 3 4 5 6 7 |
| 541 | const int expected_priority[2][last+1] = {{0, 0, 1, 0, 0, 1, 0, 0}, |
| 542 | {0, 0, 0, 1, 0, 0, 1, 1}}; |
| 543 | for (int t = 0; t < 2; ++t) |
| 544 | for (int i = first; i <= last; ++i) { |
| 545 | REMARK("\r \rTask %i... " , i); |
| 546 | ASSERT(g_trees[t][i]->priority() == (expected_priority[t][i]? tbb::priority_high : tbb::priority_normal), NULL); |
| 547 | REMARK("OK" ); |
| 548 | } |
| 549 | REMARK("\r \r" ); |
| 550 | REMARK("Also testing cancel_group_execution()\n" ); // cancellation shares propagation logic with set_priority() but there are also differences |
| 551 | g_trees[0][4]->cancel_group_execution(); |
| 552 | g_trees[0][5]->cancel_group_execution(); |
| 553 | g_trees[1][3]->cancel_group_execution(); |
| 554 | // - 1 2 3 4 5 6 7 |
| 555 | const int expected_cancellation[2][last+1] = {{0, 0, 0, 0, 1, 1, 0, 0}, |
| 556 | {0, 0, 0, 1, 0, 0, 1, 1}}; |
| 557 | for (int t = 0; t < 2; ++t) |
| 558 | for (int i = first; i <= last; ++i) { |
| 559 | REMARK("\r \rTask %i... " , i); |
| 560 | ASSERT( g_trees[t][i]->is_group_execution_cancelled() == (expected_cancellation[t][i]==1), NULL); |
| 561 | REMARK("OK" ); |
| 562 | } |
| 563 | REMARK("\r \r" ); |
| 564 | g_barrier = workers; |
| 565 | is_finished = true; |
| 566 | REMARK("waiting tasks to terminate\n" ); |
| 567 | while(g_barrier) __TBB_Yield(); |
| 568 | for (int t = 0; t < 2; ++t) |
| 569 | for (int i = first; i <= last; ++i) |
| 570 | delete g_trees[t][i]; |
| 571 | } |
| 572 | }//namespace test_propagation |
| 573 | |
| 574 | struct OuterParFor { |
| 575 | void operator()(int) const { |
| 576 | tbb::affinity_partitioner ap; |
| 577 | tbb::task_group_context ctx; |
| 578 | ctx.set_priority(tbb::priority_high); |
| 579 | tbb::parallel_for(0, 100, Harness::DummyBody(1000), ap, ctx); |
| 580 | } |
| 581 | }; |
| 582 | |
| 583 | // Test priorities with affinity tasks. |
| 584 | void TestAffinityTasks() { |
| 585 | REMARK("Test priorities with affinity tasks\n" ); |
| 586 | tbb::task_scheduler_init init; |
| 587 | tbb::affinity_partitioner ap; |
| 588 | for (int i = 0; i < 10; ++i) |
| 589 | tbb::parallel_for(0, 100, OuterParFor(), ap); |
| 590 | } |
| 591 | |
| 592 | namespace regression { |
| 593 | // This is a regression test for a bug with task_group_context used from a thread that created its local scheduler but not the implicit arena |
| 594 | class TestTGContext { |
| 595 | public: |
| 596 | void operator() (int) const { |
| 597 | tbb::task_group_context ctx; |
| 598 | ctx.cancel_group_execution(); // initializes the local weak scheduler on the thread |
| 599 | ctx.set_priority(tbb::priority_high); |
| 600 | } |
| 601 | }; |
| 602 | |
| 603 | void TestTGContextOnNewThread() { |
| 604 | REMARK("Testing a regression for a bug with task_group_context\n" ); |
| 605 | TestTGContext body; |
| 606 | NativeParallelFor(1, body); |
| 607 | } |
| 608 | }//namespace regression_priorities |
| 609 | #endif /* __TBB_TASK_PRIORITY */ |
| 610 | |
| 611 | #if !__TBB_TEST_SKIP_AFFINITY |
| 612 | #include "harness_concurrency.h" |
| 613 | #endif |
| 614 | |
| 615 | int RunTests () { |
| 616 | #if __TBB_TASK_PRIORITY |
| 617 | TestEnqueueOrder(); |
| 618 | #endif /* __TBB_TASK_PRIORITY */ |
| 619 | TestPriorityAssertions(); |
| 620 | TestSimplePriorityOps(tbb::priority_low); |
| 621 | TestSimplePriorityOps(tbb::priority_high); |
| 622 | P = tbb::task_scheduler_init::default_num_threads(); |
| 623 | REMARK( "The number of threads: %d\n" , P ); |
| 624 | if ( P < 3 ) |
| 625 | return Harness::Skipped; |
| 626 | Low = tbb::priority_normal; |
| 627 | High = tbb::priority_high; |
| 628 | TestPeriodicConcurrentActivities(); |
| 629 | TestPrioritySwitchBetweenTwoMasters(); |
| 630 | Low = tbb::priority_low; |
| 631 | High = tbb::priority_normal; |
| 632 | PreemptionActivatorId = 0; |
| 633 | TestPeriodicConcurrentActivities(); |
| 634 | TestPrioritySwitchBetweenTwoMasters(); |
| 635 | High = tbb::priority_high; |
| 636 | TestPeriodicConcurrentActivities(); |
| 637 | TestPrioritySwitchBetweenTwoMasters(); |
| 638 | PreemptionActivatorId = 1; |
| 639 | TestPrioritySwitchBetweenTwoMasters(); |
| 640 | TestAffinityTasks(); |
| 641 | regression::TestTGContextOnNewThread(); |
| 642 | |
| 643 | return Harness::Done; |
| 644 | } |
| 645 | |
| 646 | #include "tbb/global_control.h" |
| 647 | |
| 648 | int TestMain () { |
| 649 | #if !__TBB_TEST_SKIP_AFFINITY |
| 650 | Harness::LimitNumberOfThreads( DesiredNumThreads ); |
| 651 | #endif |
| 652 | #if !__TBB_TASK_PRIORITY |
| 653 | REMARK( "Priorities disabled: Running as just yet another task scheduler test\n" ); |
| 654 | #else |
| 655 | test_propagation::TestSetPriority(); // TODO: move down when bug 1996 is fixed |
| 656 | #endif /* __TBB_TASK_PRIORITY */ |
| 657 | |
| 658 | RunTests(); |
| 659 | tbb::global_control c(tbb::global_control::max_allowed_parallelism, 1); |
| 660 | PeriodicActivitiesBody::mode = 1; |
| 661 | TestSwitchBetweenMastersRepeats = 1; |
| 662 | return RunTests(); |
| 663 | } |
| 664 | |
| 665 | #else /* !__TBB_TASK_GROUP_CONTEXT */ |
| 666 | |
| 667 | int TestMain () { |
| 668 | return Harness::Skipped; |
| 669 | } |
| 670 | |
| 671 | #endif /* !__TBB_TASK_GROUP_CONTEXT */ |
| 672 | |