1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_task_group_H
18#define __TBB_task_group_H
19
20#include "task.h"
21#include "tbb_exception.h"
22#include "internal/_template_helpers.h"
23
24#if __TBB_TASK_GROUP_CONTEXT
25
26namespace tbb {
27
28namespace internal {
29 template<typename F> class task_handle_task;
30}
31
32class task_group;
33class structured_task_group;
34
35template<typename F>
36class task_handle : internal::no_assign {
37 template<typename _F> friend class internal::task_handle_task;
38 friend class task_group;
39 friend class structured_task_group;
40
41 static const intptr_t scheduled = 0x1;
42
43 F my_func;
44 intptr_t my_state;
45
46 void mark_scheduled () {
47 // The check here is intentionally lax to avoid the impact of interlocked operation
48 if ( my_state & scheduled )
49 internal::throw_exception( internal::eid_invalid_multiple_scheduling );
50 my_state |= scheduled;
51 }
52public:
53 task_handle( const F& f ) : my_func(f), my_state(0) {}
54#if __TBB_CPP11_RVALUE_REF_PRESENT
55 task_handle( F&& f ) : my_func( std::move(f)), my_state(0) {}
56#endif
57
58 void operator() () const { my_func(); }
59};
60
61enum task_group_status {
62 not_complete,
63 complete,
64 canceled
65};
66
67namespace internal {
68
69template<typename F>
70class task_handle_task : public task {
71 task_handle<F>& my_handle;
72 task* execute() __TBB_override {
73 my_handle();
74 return NULL;
75 }
76public:
77 task_handle_task( task_handle<F>& h ) : my_handle(h) { h.mark_scheduled(); }
78};
79
80class task_group_base : internal::no_copy {
81 class ref_count_guard : internal::no_copy {
82 task& my_task;
83 public:
84 ref_count_guard(task& t) : my_task(t) {
85 my_task.increment_ref_count();
86 }
87 ~ref_count_guard() {
88 my_task.decrement_ref_count();
89 }
90 };
91protected:
92 empty_task* my_root;
93 task_group_context my_context;
94
95 task& owner () { return *my_root; }
96
97 template<typename F>
98 task_group_status internal_run_and_wait( F& f ) {
99 __TBB_TRY {
100 if ( !my_context.is_group_execution_cancelled() ) {
101 // We need to increase the reference count of the root task to notify waiters that
102 // this task group has some work in progress.
103 ref_count_guard guard(*my_root);
104 f();
105 }
106 } __TBB_CATCH( ... ) {
107 my_context.register_pending_exception();
108 }
109 return wait();
110 }
111
112 template<typename Task, typename F>
113 void internal_run( __TBB_FORWARDING_REF(F) f ) {
114 owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task( internal::forward<F>(f) ));
115 }
116
117public:
118 task_group_base( uintptr_t traits = 0 )
119 : my_context(task_group_context::bound, task_group_context::default_traits | traits)
120 {
121 my_root = new( task::allocate_root(my_context) ) empty_task;
122 my_root->set_ref_count(1);
123 }
124
125 ~task_group_base() __TBB_NOEXCEPT(false) {
126 if( my_root->ref_count() > 1 ) {
127#if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
128 bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
129#else
130 bool stack_unwinding_in_progress = std::uncaught_exception();
131#endif
132 // Always attempt to do proper cleanup to avoid inevitable memory corruption
133 // in case of missing wait (for the sake of better testability & debuggability)
134 if ( !is_canceling() )
135 cancel();
136 __TBB_TRY {
137 my_root->wait_for_all();
138 } __TBB_CATCH (...) {
139 task::destroy(*my_root);
140 __TBB_RETHROW();
141 }
142 task::destroy(*my_root);
143 if ( !stack_unwinding_in_progress )
144 internal::throw_exception( internal::eid_missing_wait );
145 }
146 else {
147 task::destroy(*my_root);
148 }
149 }
150
151 template<typename F>
152 void run( task_handle<F>& h ) {
153 internal_run< internal::task_handle_task<F> >( h );
154 }
155
156 task_group_status wait() {
157 __TBB_TRY {
158 my_root->wait_for_all();
159 } __TBB_CATCH( ... ) {
160 my_context.reset();
161 __TBB_RETHROW();
162 }
163 if ( my_context.is_group_execution_cancelled() ) {
164 // TODO: the reset method is not thread-safe. Ensure the correct behavior.
165 my_context.reset();
166 return canceled;
167 }
168 return complete;
169 }
170
171 bool is_canceling() {
172 return my_context.is_group_execution_cancelled();
173 }
174
175 void cancel() {
176 my_context.cancel_group_execution();
177 }
178}; // class task_group_base
179
180} // namespace internal
181
182class task_group : public internal::task_group_base {
183public:
184 task_group () : task_group_base( task_group_context::concurrent_wait ) {}
185
186#if __SUNPRO_CC
187 template<typename F>
188 void run( task_handle<F>& h ) {
189 internal_run< internal::task_handle_task<F> >( h );
190 }
191#else
192 using task_group_base::run;
193#endif
194
195#if __TBB_CPP11_RVALUE_REF_PRESENT
196 template<typename F>
197 void run( F&& f ) {
198 internal_run< internal::function_task< typename internal::strip<F>::type > >( std::forward< F >(f) );
199 }
200#else
201 template<typename F>
202 void run(const F& f) {
203 internal_run<internal::function_task<F> >(f);
204 }
205#endif
206
207 template<typename F>
208 task_group_status run_and_wait( const F& f ) {
209 return internal_run_and_wait<const F>( f );
210 }
211
212 // TODO: add task_handle rvalues support
213 template<typename F>
214 task_group_status run_and_wait( task_handle<F>& h ) {
215 h.mark_scheduled();
216 return internal_run_and_wait< task_handle<F> >( h );
217 }
218}; // class task_group
219
220class structured_task_group : public internal::task_group_base {
221public:
222 // TODO: add task_handle rvalues support
223 template<typename F>
224 task_group_status run_and_wait ( task_handle<F>& h ) {
225 h.mark_scheduled();
226 return internal_run_and_wait< task_handle<F> >( h );
227 }
228
229 task_group_status wait() {
230 task_group_status res = task_group_base::wait();
231 my_root->set_ref_count(1);
232 return res;
233 }
234}; // class structured_task_group
235
236inline
237bool is_current_task_group_canceling() {
238 return task::self().is_cancelled();
239}
240
241#if __TBB_CPP11_RVALUE_REF_PRESENT
242template<class F>
243task_handle< typename internal::strip<F>::type > make_task( F&& f ) {
244 return task_handle< typename internal::strip<F>::type >( std::forward<F>(f) );
245}
246#else
247template<class F>
248task_handle<F> make_task( const F& f ) {
249 return task_handle<F>( f );
250}
251#endif /* __TBB_CPP11_RVALUE_REF_PRESENT */
252
253} // namespace tbb
254
255#endif /* __TBB_TASK_GROUP_CONTEXT */
256
257#endif /* __TBB_task_group_H */
258