1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_task_H
18#define __TBB_task_H
19
20#include "tbb_stddef.h"
21#include "tbb_machine.h"
22#include "tbb_profiling.h"
23#include <climits>
24
25typedef struct ___itt_caller *__itt_caller;
26
27namespace tbb {
28
29class task;
30class task_list;
31class task_group_context;
32
33// MSVC does not allow taking the address of a member that was defined
34// privately in task_base and made public in class task via a using declaration.
35#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
36#define __TBB_TASK_BASE_ACCESS public
37#else
38#define __TBB_TASK_BASE_ACCESS private
39#endif
40
41namespace internal { //< @cond INTERNAL
42
43 class allocate_additional_child_of_proxy: no_assign {
44 //! No longer used, but retained for binary layout compatibility. Always NULL.
45 task* self;
46 task& parent;
47 public:
48 explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {
49 suppress_unused_warning( self );
50 }
51 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
52 void __TBB_EXPORTED_METHOD free( task& ) const;
53 };
54
55 struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };
56} //< namespace internal @endcond
57
58namespace interface5 {
59 namespace internal {
60 //! Base class for methods that became static in TBB 3.0.
61 /** TBB's evolution caused the "this" argument for several methods to become obsolete.
62 However, for backwards binary compatibility, the new methods need distinct names,
63 otherwise the One Definition Rule would be broken. Hence the new methods are
64 defined in this private base class, and then exposed in class task via
65 using declarations. */
66 class task_base: tbb::internal::no_copy {
67 __TBB_TASK_BASE_ACCESS:
68 friend class tbb::task;
69
70 //! Schedule task for execution when a worker becomes available.
71 static void spawn( task& t );
72
73 //! Spawn multiple tasks and clear list.
74 static void spawn( task_list& list );
75
76 //! Like allocate_child, except that task's parent becomes "t", not this.
77 /** Typically used in conjunction with schedule_to_reexecute to implement while loops.
78 Atomically increments the reference count of t.parent() */
79 static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
80 return tbb::internal::allocate_additional_child_of_proxy(t);
81 }
82
83 //! Destroy a task.
84 /** Usually, calling this method is unnecessary, because a task is
85 implicitly deleted after its execute() method runs. However,
86 sometimes a task needs to be explicitly deallocated, such as
87 when a root task is used as the parent in spawn_and_wait_for_all. */
88 static void __TBB_EXPORTED_FUNC destroy( task& victim );
89 };
90 } // internal
91} // interface5
92
93//! @cond INTERNAL
94namespace internal {
95
96 class scheduler: no_copy {
97 public:
98 //! For internal use only
99 virtual void spawn( task& first, task*& next ) = 0;
100
101 //! For internal use only
102 virtual void wait_for_all( task& parent, task* child ) = 0;
103
104 //! For internal use only
105 virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
106
107 //! Pure virtual destructor;
108 // Have to have it just to shut up overzealous compilation warnings
109 virtual ~scheduler() = 0;
110
111 //! For internal use only
112 virtual void enqueue( task& t, void* reserved ) = 0;
113 };
114
115 //! A reference count
116 /** Should always be non-negative. A signed type is used so that underflow can be detected. */
117 typedef intptr_t reference_count;
118
119 //! An id as used for specifying affinity.
120 typedef unsigned short affinity_id;
121
122#if __TBB_TASK_ISOLATION
123 //! A tag for task isolation.
124 typedef intptr_t isolation_tag;
125 const isolation_tag no_isolation = 0;
126#endif /* __TBB_TASK_ISOLATION */
127
128#if __TBB_TASK_GROUP_CONTEXT
129 class generic_scheduler;
130
131 struct context_list_node_t {
132 context_list_node_t *my_prev,
133 *my_next;
134 };
135
136 class allocate_root_with_context_proxy: no_assign {
137 task_group_context& my_context;
138 public:
139 allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
140 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
141 void __TBB_EXPORTED_METHOD free( task& ) const;
142 };
143#endif /* __TBB_TASK_GROUP_CONTEXT */
144
145 class allocate_root_proxy: no_assign {
146 public:
147 static task& __TBB_EXPORTED_FUNC allocate( size_t size );
148 static void __TBB_EXPORTED_FUNC free( task& );
149 };
150
151 class allocate_continuation_proxy: no_assign {
152 public:
153 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
154 void __TBB_EXPORTED_METHOD free( task& ) const;
155 };
156
157 class allocate_child_proxy: no_assign {
158 public:
159 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
160 void __TBB_EXPORTED_METHOD free( task& ) const;
161 };
162
163#if __TBB_PREVIEW_CRITICAL_TASKS
164 // TODO: move to class methods when critical task API becomes public
165 void make_critical( task& t );
166 bool is_critical( task& t );
167#endif
168
169 //! Memory prefix to a task object.
170 /** This class is internal to the library.
171 Do not reference it directly, except within the library itself.
172 Fields are ordered in way that preserves backwards compatibility and yields good packing on
173 typical 32-bit and 64-bit platforms. New fields should be added at the beginning for
174 backward compatibility with accesses to the task prefix inlined into application code. To
175 prevent ODR violation, the class shall have the same layout in all application translation
176 units. If some fields are conditional (e.g. enabled by preview macros) and might get
177 skipped, use reserved fields to adjust the layout.
178
179 In case task prefix size exceeds 32 or 64 bytes on IA32 and Intel64 architectures
180 correspondingly, consider dynamic setting of task_alignment and task_prefix_reservation_size
181 based on the maximal operand size supported by the current CPU.
182
183 @ingroup task_scheduling */
184 class task_prefix {
185 private:
186 friend class tbb::task;
187 friend class tbb::interface5::internal::task_base;
188 friend class tbb::task_list;
189 friend class internal::scheduler;
190 friend class internal::allocate_root_proxy;
191 friend class internal::allocate_child_proxy;
192 friend class internal::allocate_continuation_proxy;
193 friend class internal::allocate_additional_child_of_proxy;
194#if __TBB_PREVIEW_CRITICAL_TASKS
195 friend void make_critical( task& );
196 friend bool is_critical( task& );
197#endif
198
199#if __TBB_TASK_ISOLATION
200 //! The tag used for task isolation.
201 isolation_tag isolation;
202#else
203 intptr_t reserved_space_for_task_isolation_tag;
204#endif /* __TBB_TASK_ISOLATION */
205
206#if __TBB_TASK_GROUP_CONTEXT
207 //! Shared context that is used to communicate asynchronous state changes
208 /** Currently it is used to broadcast cancellation requests generated both
209 by users and as the result of unhandled exceptions in the task::execute()
210 methods. */
211 task_group_context *context;
212#endif /* __TBB_TASK_GROUP_CONTEXT */
213
214 //! The scheduler that allocated the task, or NULL if the task is big.
215 /** Small tasks are pooled by the scheduler that allocated the task.
216 If a scheduler needs to free a small task allocated by another scheduler,
217 it returns the task to that other scheduler. This policy avoids
218 memory space blowup issues for memory allocators that allocate from
219 thread-specific pools. */
220 scheduler* origin;
221
222#if __TBB_TASK_PRIORITY
223 union {
224#endif /* __TBB_TASK_PRIORITY */
225 //! Obsolete. The scheduler that owns the task.
226 /** Retained only for the sake of backward binary compatibility.
227 Still used by inline methods in the task.h header. **/
228 scheduler* owner;
229
230#if __TBB_TASK_PRIORITY
231 //! Pointer to the next offloaded lower priority task.
232 /** Used to maintain a list of offloaded tasks inside the scheduler. **/
233 task* next_offloaded;
234 };
235#endif /* __TBB_TASK_PRIORITY */
236
237 //! The task whose reference count includes me.
238 /** In the "blocking style" of programming, this field points to the parent task.
239 In the "continuation-passing style" of programming, this field points to the
240 continuation of the parent. */
241 tbb::task* parent;
242
243 //! Reference count used for synchronization.
244 /** In the "continuation-passing style" of programming, this field is
245 the difference of the number of allocated children minus the
246 number of children that have completed.
247 In the "blocking style" of programming, this field is one more than the difference. */
248 __TBB_atomic reference_count ref_count;
249
250 //! Obsolete. Used to be scheduling depth before TBB 2.2
251 /** Retained only for the sake of backward binary compatibility.
252 Not used by TBB anymore. **/
253 int depth;
254
255 //! A task::state_type, stored as a byte for compactness.
256 /** This state is exposed to users via method task::state(). */
257 unsigned char state;
258
259 //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
260 /** 0x0 -> version 1.0 task
261 0x1 -> version >=2.1 task
262 0x10 -> task was enqueued
263 0x20 -> task_proxy
264 0x40 -> task has live ref_count
265 0x80 -> a stolen task */
266 unsigned char extra_state;
267
268 affinity_id affinity;
269
270 //! "next" field for list of task
271 tbb::task* next;
272
273 //! The task corresponding to this task_prefix.
274 tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
275 };
276
277} // namespace internal
278//! @endcond
279
280#if __TBB_TASK_GROUP_CONTEXT
281
282#if __TBB_TASK_PRIORITY
283namespace internal {
284 static const int priority_stride_v4 = INT_MAX / 4;
285#if __TBB_PREVIEW_CRITICAL_TASKS
286 // TODO: move into priority_t enum when critical tasks become public feature
287 static const int priority_critical = priority_stride_v4 * 3 + priority_stride_v4 / 3 * 2;
288#endif
289}
290
291enum priority_t {
292 priority_normal = internal::priority_stride_v4 * 2,
293 priority_low = priority_normal - internal::priority_stride_v4,
294 priority_high = priority_normal + internal::priority_stride_v4
295};
296
297#endif /* __TBB_TASK_PRIORITY */
298
299#if TBB_USE_CAPTURED_EXCEPTION
300 class tbb_exception;
301#else
302 namespace internal {
303 class tbb_exception_ptr;
304 }
305#endif /* !TBB_USE_CAPTURED_EXCEPTION */
306
307class task_scheduler_init;
308namespace interface7 { class task_arena; }
309using interface7::task_arena;
310
311//! Used to form groups of tasks
312/** @ingroup task_scheduling
313 The context services explicit cancellation requests from user code, and unhandled
314 exceptions intercepted during tasks execution. Intercepting an exception results
315 in generating internal cancellation requests (which is processed in exactly the
316 same way as external ones).
317
318 The context is associated with one or more root tasks and defines the cancellation
319 group that includes all the descendants of the corresponding root task(s). Association
320 is established when a context object is passed as an argument to the task::allocate_root()
321 method. See task_group_context::task_group_context for more details.
322
323 The context can be bound to another one, and other contexts can be bound to it,
324 forming a tree-like structure: parent -> this -> children. Arrows here designate
325 cancellation propagation direction. If a task in a cancellation group is cancelled
326 all the other tasks in this group and groups bound to it (as children) get cancelled too.
327
328 IMPLEMENTATION NOTE:
329 When adding new members to task_group_context or changing types of existing ones,
330 update the size of both padding buffers (_leading_padding and _trailing_padding)
331 appropriately. See also VERSIONING NOTE at the constructor definition below. **/
332class task_group_context : internal::no_copy {
333private:
334 friend class internal::generic_scheduler;
335 friend class task_scheduler_init;
336 friend class task_arena;
337
338#if TBB_USE_CAPTURED_EXCEPTION
339 typedef tbb_exception exception_container_type;
340#else
341 typedef internal::tbb_exception_ptr exception_container_type;
342#endif
343
344 enum version_traits_word_layout {
345 traits_offset = 16,
346 version_mask = 0xFFFF,
347 traits_mask = 0xFFFFul << traits_offset
348 };
349
350public:
351 enum kind_type {
352 isolated,
353 bound
354 };
355
356 enum traits_type {
357 exact_exception = 0x0001ul << traits_offset,
358#if __TBB_FP_CONTEXT
359 fp_settings = 0x0002ul << traits_offset,
360#endif
361 concurrent_wait = 0x0004ul << traits_offset,
362#if TBB_USE_CAPTURED_EXCEPTION
363 default_traits = 0
364#else
365 default_traits = exact_exception
366#endif /* !TBB_USE_CAPTURED_EXCEPTION */
367 };
368
369private:
370 enum state {
371 may_have_children = 1,
372 // the following enumerations must be the last, new 2^x values must go above
373 next_state_value, low_unused_state_bit = (next_state_value-1)*2
374 };
375
376 union {
377 //! Flavor of this context: bound or isolated.
378 // TODO: describe asynchronous use, and whether any memory semantics are needed
379 __TBB_atomic kind_type my_kind;
380 uintptr_t _my_kind_aligner;
381 };
382
383 //! Pointer to the context of the parent cancellation group. NULL for isolated contexts.
384 task_group_context *my_parent;
385
386 //! Used to form the thread specific list of contexts without additional memory allocation.
387 /** A context is included into the list of the current thread when its binding to
388 its parent happens. Any context can be present in the list of one thread only. **/
389 internal::context_list_node_t my_node;
390
391 //! Used to set and maintain stack stitching point for Intel Performance Tools.
392 __itt_caller itt_caller;
393
394 //! Leading padding protecting accesses to frequently used members from false sharing.
395 /** Read accesses to the field my_cancellation_requested are on the hot path inside
396 the scheduler. This padding ensures that this field never shares the same cache
397 line with a local variable that is frequently written to. **/
398 char _leading_padding[internal::NFS_MaxLineSize
399 - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
400 - sizeof(__itt_caller)
401#if __TBB_FP_CONTEXT
402 - sizeof(internal::cpu_ctl_env_space)
403#endif
404 ];
405
406#if __TBB_FP_CONTEXT
407 //! Space for platform-specific FPU settings.
408 /** Must only be accessed inside TBB binaries, and never directly in user
409 code or inline methods. */
410 internal::cpu_ctl_env_space my_cpu_ctl_env;
411#endif
412
413 //! Specifies whether cancellation was requested for this task group.
414 uintptr_t my_cancellation_requested;
415
416 //! Version for run-time checks and behavioral traits of the context.
417 /** Version occupies low 16 bits, and traits (zero or more ORed enumerators
418 from the traits_type enumerations) take the next 16 bits.
419 Original (zeroth) version of the context did not support any traits. **/
420 uintptr_t my_version_and_traits;
421
422 //! Pointer to the container storing exception being propagated across this task group.
423 exception_container_type *my_exception;
424
425 //! Scheduler instance that registered this context in its thread specific list.
426 internal::generic_scheduler *my_owner;
427
428 //! Internal state (combination of state flags, currently only may_have_children).
429 uintptr_t my_state;
430
431#if __TBB_TASK_PRIORITY
432 //! Priority level of the task group (in normalized representation)
433 intptr_t my_priority;
434#endif /* __TBB_TASK_PRIORITY */
435
436 //! Description of algorithm for scheduler based instrumentation.
437 internal::string_index my_name;
438
439 //! Trailing padding protecting accesses to frequently used members from false sharing
440 /** \sa _leading_padding **/
441 char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
442#if __TBB_TASK_PRIORITY
443 - sizeof(intptr_t)
444#endif /* __TBB_TASK_PRIORITY */
445 - sizeof(internal::string_index)
446 ];
447
448public:
449 //! Default & binding constructor.
450 /** By default a bound context is created. That is this context will be bound
451 (as child) to the context of the task calling task::allocate_root(this_context)
452 method. Cancellation requests passed to the parent context are propagated
453 to all the contexts bound to it. Similarly priority change is propagated
454 from the parent context to its children.
455
456 If task_group_context::isolated is used as the argument, then the tasks associated
457 with this context will never be affected by events in any other context.
458
459 Creating isolated contexts involve much less overhead, but they have limited
460 utility. Normally when an exception occurs in an algorithm that has nested
461 ones running, it is desirably to have all the nested algorithms cancelled
462 as well. Such a behavior requires nested algorithms to use bound contexts.
463
464 There is one good place where using isolated algorithms is beneficial. It is
465 a master thread. That is if a particular algorithm is invoked directly from
466 the master thread (not from a TBB task), supplying it with explicitly
467 created isolated context will result in a faster algorithm startup.
468
469 VERSIONING NOTE:
470 Implementation(s) of task_group_context constructor(s) cannot be made
471 entirely out-of-line because the run-time version must be set by the user
472 code. This will become critically important for binary compatibility, if
473 we ever have to change the size of the context object.
474
475 Boosting the runtime version will also be necessary if new data fields are
476 introduced in the currently unused padding areas and these fields are updated
477 by inline methods. **/
478 task_group_context ( kind_type relation_with_parent = bound,
479 uintptr_t t = default_traits )
480 : my_kind(relation_with_parent)
481 , my_version_and_traits(3 | t)
482 , my_name(internal::CUSTOM_CTX)
483 {
484 init();
485 }
486
487 // Custom constructor for instrumentation of tbb algorithm
488 task_group_context ( internal::string_index name )
489 : my_kind(bound)
490 , my_version_and_traits(3 | default_traits)
491 , my_name(name)
492 {
493 init();
494 }
495
496 // Do not introduce standalone unbind method since it will break state propagation assumptions
497 __TBB_EXPORTED_METHOD ~task_group_context ();
498
499 //! Forcefully reinitializes the context after the task tree it was associated with is completed.
500 /** Because the method assumes that all the tasks that used to be associated with
501 this context have already finished, calling it while the context is still
502 in use somewhere in the task hierarchy leads to undefined behavior.
503
504 IMPORTANT: This method is not thread safe!
505
506 The method does not change the context's parent if it is set. **/
507 void __TBB_EXPORTED_METHOD reset ();
508
509 //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
510 /** \return false if cancellation has already been requested, true otherwise.
511
512 Note that canceling never fails. When false is returned, it just means that
513 another thread (or this one) has already sent cancellation request to this
514 context or to one of its ancestors (if this context is bound). It is guaranteed
515 that when this method is concurrently called on the same not yet cancelled
516 context, true will be returned by one and only one invocation. **/
517 bool __TBB_EXPORTED_METHOD cancel_group_execution ();
518
519 //! Returns true if the context received cancellation request.
520 bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
521
522 //! Records the pending exception, and cancels the task group.
523 /** May be called only from inside a catch-block. If the context is already
524 cancelled, does nothing.
525 The method brings the task group associated with this context exactly into
526 the state it would be in, if one of its tasks threw the currently pending
527 exception during its execution. In other words, it emulates the actions
528 of the scheduler's dispatch loop exception handler. **/
529 void __TBB_EXPORTED_METHOD register_pending_exception ();
530
531#if __TBB_FP_CONTEXT
532 //! Captures the current FPU control settings to the context.
533 /** Because the method assumes that all the tasks that used to be associated with
534 this context have already finished, calling it while the context is still
535 in use somewhere in the task hierarchy leads to undefined behavior.
536
537 IMPORTANT: This method is not thread safe!
538
539 The method does not change the FPU control settings of the context's parent. **/
540 void __TBB_EXPORTED_METHOD capture_fp_settings ();
541#endif
542
543#if __TBB_TASK_PRIORITY
544 //! Changes priority of the task group
545 void set_priority ( priority_t );
546
547 //! Retrieves current priority of the current task group
548 priority_t priority () const;
549#endif /* __TBB_TASK_PRIORITY */
550
551 //! Returns the context's trait
552 uintptr_t traits() const { return my_version_and_traits & traits_mask; }
553
554protected:
555 //! Out-of-line part of the constructor.
556 /** Singled out to ensure backward binary compatibility of the future versions. **/
557 void __TBB_EXPORTED_METHOD init ();
558
559private:
560 friend class task;
561 friend class internal::allocate_root_with_context_proxy;
562
563 static const kind_type binding_required = bound;
564 static const kind_type binding_completed = kind_type(bound+1);
565 static const kind_type detached = kind_type(binding_completed+1);
566 static const kind_type dying = kind_type(detached+1);
567
568 //! Propagates any state change detected to *this, and as an optimisation possibly also upward along the heritage line.
569 template <typename T>
570 void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
571
572 //! Registers this context with the local scheduler and binds it to its parent context
573 void bind_to ( internal::generic_scheduler *local_sched );
574
575 //! Registers this context with the local scheduler
576 void register_with ( internal::generic_scheduler *local_sched );
577
578#if __TBB_FP_CONTEXT
579 //! Copies FPU control setting from another context
580 // TODO: Consider adding #else stub in order to omit #if sections in other code
581 void copy_fp_settings( const task_group_context &src );
582#endif /* __TBB_FP_CONTEXT */
583}; // class task_group_context
584
585#endif /* __TBB_TASK_GROUP_CONTEXT */
586
587//! Base class for user-defined tasks.
588/** @ingroup task_scheduling */
589class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
590
591 //! Set reference count
592 void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
593
594 //! Decrement reference count and return its new value.
595 internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
596
597protected:
598 //! Default constructor.
599 task() {prefix().extra_state=1;}
600
601public:
602 //! Destructor.
603 virtual ~task() {}
604
605 //! Should be overridden by derived classes.
606 virtual task* execute() = 0;
607
608 //! Enumeration of task states that the scheduler considers.
609 enum state_type {
610 //! task is running, and will be destroyed after method execute() completes.
611 executing,
612 //! task to be rescheduled.
613 reexecute,
614 //! task is in ready pool, or is going to be put there, or was just taken off.
615 ready,
616 //! task object is freshly allocated or recycled.
617 allocated,
618 //! task object is on free list, or is going to be put there, or was just taken off.
619 freed,
620 //! task to be recycled as continuation
621 recycle
622#if __TBB_RECYCLE_TO_ENQUEUE
623 //! task to be scheduled for starvation-resistant execution
624 ,to_enqueue
625#endif
626 };
627
628 //------------------------------------------------------------------------
629 // Allocating tasks
630 //------------------------------------------------------------------------
631
632 //! Returns proxy for overloaded new that allocates a root task.
633 static internal::allocate_root_proxy allocate_root() {
634 return internal::allocate_root_proxy();
635 }
636
637#if __TBB_TASK_GROUP_CONTEXT
638 //! Returns proxy for overloaded new that allocates a root task associated with user supplied context.
639 static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
640 return internal::allocate_root_with_context_proxy(ctx);
641 }
642#endif /* __TBB_TASK_GROUP_CONTEXT */
643
644 //! Returns proxy for overloaded new that allocates a continuation task of *this.
645 /** The continuation's parent becomes the parent of *this. */
646 internal::allocate_continuation_proxy& allocate_continuation() {
647 return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
648 }
649
650 //! Returns proxy for overloaded new that allocates a child task of *this.
651 internal::allocate_child_proxy& allocate_child() {
652 return *reinterpret_cast<internal::allocate_child_proxy*>(this);
653 }
654
655 //! Define recommended static form via import from base class.
656 using task_base::allocate_additional_child_of;
657
658#if __TBB_DEPRECATED_TASK_INTERFACE
659 //! Destroy a task.
660 /** Usually, calling this method is unnecessary, because a task is
661 implicitly deleted after its execute() method runs. However,
662 sometimes a task needs to be explicitly deallocated, such as
663 when a root task is used as the parent in spawn_and_wait_for_all. */
664 void __TBB_EXPORTED_METHOD destroy( task& t );
665#else /* !__TBB_DEPRECATED_TASK_INTERFACE */
666 //! Define recommended static form via import from base class.
667 using task_base::destroy;
668#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
669
670 //------------------------------------------------------------------------
671 // Recycling of tasks
672 //------------------------------------------------------------------------
673
674 //! Change this to be a continuation of its former self.
675 /** The caller must guarantee that the task's refcount does not become zero until
676 after the method execute() returns. Typically, this is done by having
677 method execute() return a pointer to a child of the task. If the guarantee
678 cannot be made, use method recycle_as_safe_continuation instead.
679
680 Because of the hazard, this method may be deprecated in the future. */
681 void recycle_as_continuation() {
682 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
683 prefix().state = allocated;
684 }
685
686 //! Recommended to use, safe variant of recycle_as_continuation
687 /** For safety, it requires additional increment of ref_count.
688 With no descendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */
689 void recycle_as_safe_continuation() {
690 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
691 prefix().state = recycle;
692 }
693
694 //! Change this to be a child of new_parent.
695 void recycle_as_child_of( task& new_parent ) {
696 internal::task_prefix& p = prefix();
697 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
698 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
699 __TBB_ASSERT( p.parent==NULL, "parent must be null" );
700 __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
701 __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
702 p.state = allocated;
703 p.parent = &new_parent;
704#if __TBB_TASK_GROUP_CONTEXT
705 p.context = new_parent.prefix().context;
706#endif /* __TBB_TASK_GROUP_CONTEXT */
707 }
708
709 //! Schedule this for reexecution after current execute() returns.
710 /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */
711 void recycle_to_reexecute() {
712 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
713 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
714 prefix().state = reexecute;
715 }
716
717#if __TBB_RECYCLE_TO_ENQUEUE
718 //! Schedule this to enqueue after descendant tasks complete.
719 /** Save enqueue/spawn difference, it has the semantics of recycle_as_safe_continuation. */
720 void recycle_to_enqueue() {
721 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
722 prefix().state = to_enqueue;
723 }
724#endif /* __TBB_RECYCLE_TO_ENQUEUE */
725
726 //------------------------------------------------------------------------
727 // Spawning and blocking
728 //------------------------------------------------------------------------
729
730 //! Set reference count
731 void set_ref_count( int count ) {
732#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
733 internal_set_ref_count(count);
734#else
735 prefix().ref_count = count;
736#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
737 }
738
739 //! Atomically increment reference count.
740 /** Has acquire semantics */
741 void increment_ref_count() {
742 __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
743 }
744
745 //! Atomically adds to reference count and returns its new value.
746 /** Has release-acquire semantics */
747 int add_ref_count( int count ) {
748 internal::call_itt_notify( internal::releasing, &prefix().ref_count );
749 internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
750 __TBB_ASSERT( k>=0, "task's reference count underflowed" );
751 if( k==0 )
752 internal::call_itt_notify( internal::acquired, &prefix().ref_count );
753 return int(k);
754 }
755
756 //! Atomically decrement reference count and returns its new value.
757 /** Has release semantics. */
758 int decrement_ref_count() {
759#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
760 return int(internal_decrement_ref_count());
761#else
762 return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
763#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
764 }
765
766 //! Define recommended static forms via import from base class.
767 using task_base::spawn;
768
769 //! Similar to spawn followed by wait_for_all, but more efficient.
770 void spawn_and_wait_for_all( task& child ) {
771 prefix().owner->wait_for_all( *this, &child );
772 }
773
774 //! Similar to spawn followed by wait_for_all, but more efficient.
775 void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
776
777 //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
778 static void spawn_root_and_wait( task& root ) {
779 root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
780 }
781
782 //! Spawn root tasks on list and wait for all of them to finish.
783 /** If there are more tasks than worker threads, the tasks are spawned in
784 order of front to back. */
785 static void spawn_root_and_wait( task_list& root_list );
786
787 //! Wait for reference count to become one, and set reference count to zero.
788 /** Works on tasks while waiting. */
789 void wait_for_all() {
790 prefix().owner->wait_for_all( *this, NULL );
791 }
792
793 //! Enqueue task for starvation-resistant execution.
794#if __TBB_TASK_PRIORITY
795 /** The task will be enqueued on the normal priority level disregarding the
796 priority of its task group.
797
798 The rationale of such semantics is that priority of an enqueued task is
799 statically fixed at the moment of its enqueuing, while task group priority
800 is dynamic. Thus automatic priority inheritance would be generally a subject
801 to the race, which may result in unexpected behavior.
802
803 Use enqueue() overload with explicit priority value and task::group_priority()
804 method to implement such priority inheritance when it is really necessary. **/
805#endif /* __TBB_TASK_PRIORITY */
806 static void enqueue( task& t ) {
807 t.prefix().owner->enqueue( t, NULL );
808 }
809
810#if __TBB_TASK_PRIORITY
811 //! Enqueue task for starvation-resistant execution on the specified priority level.
812 static void enqueue( task& t, priority_t p ) {
813#if __TBB_PREVIEW_CRITICAL_TASKS
814 __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high
815 || p == internal::priority_critical, "Invalid priority level value");
816#else
817 __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
818#endif
819 t.prefix().owner->enqueue( t, (void*)p );
820 }
821#endif /* __TBB_TASK_PRIORITY */
822
823 //! Enqueue task in task_arena
824 //! The implementation is in task_arena.h
825 inline static void enqueue( task& t, task_arena& arena
826#if __TBB_TASK_PRIORITY
827 , priority_t p = priority_t(0)
828#endif
829 );
830
831 //! The innermost task being executed or destroyed by the current thread at the moment.
832 static task& __TBB_EXPORTED_FUNC self();
833
834 //! task on whose behalf this task is working, or NULL if this is a root.
835 task* parent() const {return prefix().parent;}
836
837 //! sets parent task pointer to specified value
838 void set_parent(task* p) {
839#if __TBB_TASK_GROUP_CONTEXT
840 __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
841#endif
842 prefix().parent = p;
843 }
844
845#if __TBB_TASK_GROUP_CONTEXT
846 //! This method is deprecated and will be removed in the future.
847 /** Use method group() instead. **/
848 task_group_context* context() {return prefix().context;}
849
850 //! Pointer to the task group descriptor.
851 task_group_context* group () { return prefix().context; }
852#endif /* __TBB_TASK_GROUP_CONTEXT */
853
854 //! True if task was stolen from the task pool of another thread.
855 bool is_stolen_task() const {
856 return (prefix().extra_state & 0x80)!=0;
857 }
858
859 //------------------------------------------------------------------------
860 // Debugging
861 //------------------------------------------------------------------------
862
863 //! Current execution state
864 state_type state() const {return state_type(prefix().state);}
865
866 //! The internal reference count.
867 int ref_count() const {
868#if TBB_USE_ASSERT
869 internal::reference_count ref_count_ = prefix().ref_count;
870 __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
871#endif
872 return int(prefix().ref_count);
873 }
874
875 //! Obsolete, and only retained for the sake of backward compatibility. Always returns true.
876 bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
877
878 //------------------------------------------------------------------------
879 // Affinity
880 //------------------------------------------------------------------------
881
882 //! An id as used for specifying affinity.
883 /** Guaranteed to be integral type. Value of 0 means no affinity. */
884 typedef internal::affinity_id affinity_id;
885
886 //! Set affinity for this task.
887 void set_affinity( affinity_id id ) {prefix().affinity = id;}
888
889 //! Current affinity of this task
890 affinity_id affinity() const {return prefix().affinity;}
891
892 //! Invoked by scheduler to notify task that it ran on unexpected thread.
893 /** Invoked before method execute() runs, if task is stolen, or task has
894 affinity but will be executed on another thread.
895
896 The default action does nothing. */
897 virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
898
899#if __TBB_TASK_GROUP_CONTEXT
900 //! Moves this task from its current group into another one.
901 /** Argument ctx specifies the new group.
902
903 The primary purpose of this method is to associate unique task group context
904 with a task allocated for subsequent enqueuing. In contrast to spawned tasks
905 enqueued ones normally outlive the scope where they were created. This makes
906 traditional usage model where task group context are allocated locally on
907 the stack inapplicable. Dynamic allocation of context objects is performance
908 inefficient. Method change_group() allows to make task group context object
909 a member of the task class, and then associate it with its containing task
910 object in the latter's constructor. **/
911 void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );
912
913 //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
914 /** \return false if cancellation has already been requested, true otherwise. **/
915 bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
916
917 //! Returns true if the context has received cancellation request.
918 bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
919#else
920 bool is_cancelled () const { return false; }
921#endif /* __TBB_TASK_GROUP_CONTEXT */
922
923#if __TBB_TASK_PRIORITY
924 //! Changes priority of the task group this task belongs to.
925 void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
926
927 //! Retrieves current priority of the task group this task belongs to.
928 priority_t group_priority () const { return prefix().context->priority(); }
929
930#endif /* __TBB_TASK_PRIORITY */
931
932private:
933 friend class interface5::internal::task_base;
934 friend class task_list;
935 friend class internal::scheduler;
936 friend class internal::allocate_root_proxy;
937#if __TBB_TASK_GROUP_CONTEXT
938 friend class internal::allocate_root_with_context_proxy;
939#endif /* __TBB_TASK_GROUP_CONTEXT */
940 friend class internal::allocate_continuation_proxy;
941 friend class internal::allocate_child_proxy;
942 friend class internal::allocate_additional_child_of_proxy;
943
944 //! Get reference to corresponding task_prefix.
945 /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/
946 internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
947 return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
948 }
949#if __TBB_PREVIEW_CRITICAL_TASKS
950 friend void internal::make_critical( task& );
951 friend bool internal::is_critical( task& );
952#endif
953}; // class task
954
955#if __TBB_PREVIEW_CRITICAL_TASKS
956namespace internal {
957inline void make_critical( task& t ) { t.prefix().extra_state |= 0x8; }
958inline bool is_critical( task& t ) { return bool((t.prefix().extra_state & 0x8) != 0); }
959} // namespace internal
960#endif /* __TBB_PREVIEW_CRITICAL_TASKS */
961
962//! task that does nothing. Useful for synchronization.
963/** @ingroup task_scheduling */
964class empty_task: public task {
965 task* execute() __TBB_override {
966 return NULL;
967 }
968};
969
970//! @cond INTERNAL
971namespace internal {
972 template<typename F>
973 class function_task : public task {
974#if __TBB_ALLOW_MUTABLE_FUNCTORS
975 F my_func;
976#else
977 const F my_func;
978#endif
979 task* execute() __TBB_override {
980 my_func();
981 return NULL;
982 }
983 public:
984 function_task( const F& f ) : my_func(f) {}
985#if __TBB_CPP11_RVALUE_REF_PRESENT
986 function_task( F&& f ) : my_func( std::move(f) ) {}
987#endif
988 };
989} // namespace internal
990//! @endcond
991
992//! A list of children.
993/** Used for method task::spawn_children
994 @ingroup task_scheduling */
995class task_list: internal::no_copy {
996private:
997 task* first;
998 task** next_ptr;
999 friend class task;
1000 friend class interface5::internal::task_base;
1001public:
1002 //! Construct empty list
1003 task_list() : first(NULL), next_ptr(&first) {}
1004
1005 //! Destroys the list, but does not destroy the task objects.
1006 ~task_list() {}
1007
1008 //! True if list is empty; false otherwise.
1009 bool empty() const {return !first;}
1010
1011 //! Push task onto back of list.
1012 void push_back( task& task ) {
1013 task.prefix().next = NULL;
1014 *next_ptr = &task;
1015 next_ptr = &task.prefix().next;
1016 }
1017#if __TBB_TODO
1018 // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
1019 //! Push task onto front of list (FIFO local execution, like individual spawning in the same order).
1020 void push_front( task& task ) {
1021 if( empty() ) {
1022 push_back(task);
1023 } else {
1024 task.prefix().next = first;
1025 first = &task;
1026 }
1027 }
1028#endif
1029 //! Pop the front task from the list.
1030 task& pop_front() {
1031 __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
1032 task* result = first;
1033 first = result->prefix().next;
1034 if( !first ) next_ptr = &first;
1035 return *result;
1036 }
1037
1038 //! Clear the list
1039 void clear() {
1040 first=NULL;
1041 next_ptr=&first;
1042 }
1043};
1044
1045inline void interface5::internal::task_base::spawn( task& t ) {
1046 t.prefix().owner->spawn( t, t.prefix().next );
1047}
1048
1049inline void interface5::internal::task_base::spawn( task_list& list ) {
1050 if( task* t = list.first ) {
1051 t->prefix().owner->spawn( *t, *list.next_ptr );
1052 list.clear();
1053 }
1054}
1055
1056inline void task::spawn_root_and_wait( task_list& root_list ) {
1057 if( task* t = root_list.first ) {
1058 t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
1059 root_list.clear();
1060 }
1061}
1062
1063} // namespace tbb
1064
1065inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
1066 return &tbb::internal::allocate_root_proxy::allocate(bytes);
1067}
1068
1069inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
1070 tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
1071}
1072
1073#if __TBB_TASK_GROUP_CONTEXT
1074inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
1075 return &p.allocate(bytes);
1076}
1077
1078inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
1079 p.free( *static_cast<tbb::task*>(task) );
1080}
1081#endif /* __TBB_TASK_GROUP_CONTEXT */
1082
1083inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
1084 return &p.allocate(bytes);
1085}
1086
1087inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
1088 p.free( *static_cast<tbb::task*>(task) );
1089}
1090
1091inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
1092 return &p.allocate(bytes);
1093}
1094
1095inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
1096 p.free( *static_cast<tbb::task*>(task) );
1097}
1098
1099inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1100 return &p.allocate(bytes);
1101}
1102
1103inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1104 p.free( *static_cast<tbb::task*>(task) );
1105}
1106
1107#endif /* __TBB_task_H */
1108