1 | /* |
2 | Copyright (c) 2005-2019 Intel Corporation |
3 | |
4 | Licensed under the Apache License, Version 2.0 (the "License"); |
5 | you may not use this file except in compliance with the License. |
6 | You may obtain a copy of the License at |
7 | |
8 | http://www.apache.org/licenses/LICENSE-2.0 |
9 | |
10 | Unless required by applicable law or agreed to in writing, software |
11 | distributed under the License is distributed on an "AS IS" BASIS, |
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | See the License for the specific language governing permissions and |
14 | limitations under the License. |
15 | */ |
16 | |
17 | /* The test uses "single produces multiple consumers" (SPMC )pattern to check |
18 | if the memory of the tasks stolen by consumer threads is returned to the |
19 | producer thread and is reused. |
20 | |
21 | The test consists of a series of iterations, which execute a task tree. |
22 | the test fails is the memory consumption is not stabilized during some |
23 | number of iterations. |
24 | |
25 | After the memory consumption stabilized the memory state is perturbed by |
26 | switching producer thread, and the check is repeated. |
27 | */ |
28 | |
29 | #define HARNESS_DEFAULT_MIN_THREADS -1 |
30 | |
31 | #define __TBB_COUNT_TASK_NODES 1 |
32 | #include "harness_inject_scheduler.h" |
33 | |
34 | #include "tbb/atomic.h" |
35 | #include "harness_assert.h" |
36 | #include <cstdlib> |
37 | |
38 | |
39 | // Test configuration parameters |
40 | |
41 | //! Maximal number of test iterations |
42 | const int MaxIterations = 600; |
43 | //! Number of iterations during which the memory consumption must stabilize |
44 | const int AsymptoticRange = 100; |
45 | //! Number of times the memory state is perturbed to repeat the check |
46 | const int NumProducerSwitches = 2; |
47 | //! Number of iterations after which the success of producer switch is checked |
48 | const int ProducerCheckTimeout = 10; |
49 | //! Number of initial iteration used to collect statistics to be used in later checks |
50 | const int InitialStatsIterations = 20; |
51 | //! Inner iterations of RunTaskGenerators() |
52 | const int TaskGeneratorsIterations = TBB_USE_DEBUG ? 30 : 100; |
53 | |
54 | tbb::atomic<int> Count; |
55 | tbb::atomic<tbb::task*> Exchanger; |
56 | tbb::internal::scheduler* Producer; |
57 | |
58 | #include "tbb/task_scheduler_init.h" |
59 | |
60 | #include "harness.h" |
61 | |
62 | using namespace tbb; |
63 | using namespace tbb::internal; |
64 | |
65 | class ChangeProducer: public tbb::task { |
66 | public: |
67 | tbb::task* execute() __TBB_override { |
68 | if( is_stolen_task() ) { |
69 | Producer = internal::governor::local_scheduler(); |
70 | } |
71 | return NULL; |
72 | } |
73 | }; |
74 | |
75 | class TaskGenerator: public tbb::task { |
76 | const int my_child_count; |
77 | int my_depth; |
78 | public: |
79 | TaskGenerator(int child_count, int d) : my_child_count(child_count), my_depth(d) { |
80 | ASSERT(my_child_count>1, "The TaskGenerator should produce at least two children" ); |
81 | } |
82 | tbb::task* execute() __TBB_override { |
83 | if( my_depth>0 ) { |
84 | int child_count = my_child_count; |
85 | scheduler* my_sched = internal::governor::local_scheduler(); |
86 | tbb::task& c = *new( allocate_continuation() ) tbb::empty_task; |
87 | c.set_ref_count( child_count ); |
88 | recycle_as_child_of(c); |
89 | --child_count; |
90 | if( Producer==my_sched ) { |
91 | // produce a task and put it into Exchanger |
92 | tbb::task* t = new( c.allocate_child() ) tbb::empty_task; |
93 | --child_count; |
94 | t = Exchanger.fetch_and_store(t); |
95 | if( t ) spawn(*t); |
96 | } else { |
97 | tbb::task* t = Exchanger.fetch_and_store(NULL); |
98 | if( t ) spawn(*t); |
99 | } |
100 | while( child_count ) { |
101 | tbb::task* t = new( c.allocate_child() ) TaskGenerator(my_child_count, my_depth-1); |
102 | if( my_depth >4 ) enqueue(*t); |
103 | else spawn(*t); |
104 | --child_count; |
105 | } |
106 | --my_depth; |
107 | return this; |
108 | } else { |
109 | tbb::task* t = Exchanger.fetch_and_store(NULL); |
110 | if( t ) spawn(*t); |
111 | return NULL; |
112 | } |
113 | } |
114 | }; |
115 | |
116 | #include "harness_memory.h" |
117 | #if _MSC_VER==1500 && !defined(__INTEL_COMPILER) |
118 | // VS2008/VC9 seems to have an issue |
119 | #pragma warning( push ) |
120 | #pragma warning( disable: 4985 ) |
121 | #endif |
122 | #include <math.h> |
123 | #if _MSC_VER==1500 && !defined(__INTEL_COMPILER) |
124 | #pragma warning( pop ) |
125 | #endif |
126 | |
127 | void RunTaskGenerators( bool switchProducer = false, bool checkProducer = false ) { |
128 | if( switchProducer ) |
129 | Producer = NULL; |
130 | tbb::task* dummy_root = new( tbb::task::allocate_root() ) tbb::empty_task; |
131 | dummy_root->set_ref_count( 2 ); |
132 | // If no producer, start elections; some worker will take the role |
133 | if( Producer ) |
134 | tbb::task::spawn( *new( dummy_root->allocate_child() ) tbb::empty_task ); |
135 | else |
136 | tbb::task::spawn( *new( dummy_root->allocate_child() ) ChangeProducer ); |
137 | if( checkProducer && !Producer ) |
138 | REPORT("Warning: producer has not changed after 10 attempts; running on a single core?\n" ); |
139 | for( int j=0; j<TaskGeneratorsIterations; ++j ) { |
140 | if( j&1 ) { |
141 | tbb::task& t = *new( tbb::task::allocate_root() ) TaskGenerator(/*child_count=*/4, /*depth=*/6); |
142 | tbb::task::spawn_root_and_wait(t); |
143 | } else { |
144 | tbb::task& t = *new (tbb::task::allocate_additional_child_of(*dummy_root)) |
145 | TaskGenerator(/*child_count=*/4, /*depth=*/6); |
146 | tbb::task::enqueue(t); |
147 | } |
148 | } |
149 | dummy_root->wait_for_all(); |
150 | tbb::task::destroy( *dummy_root ); |
151 | } |
152 | |
153 | class TaskList: public tbb::task { |
154 | const int my_num_childs; |
155 | public: |
156 | TaskList(const int num_childs) : my_num_childs(num_childs) {} |
157 | tbb::task* execute() __TBB_override { |
158 | tbb::task_list list; |
159 | for (int i=0; i<my_num_childs; ++i) |
160 | { |
161 | list.push_back( *new( allocate_child() ) tbb::empty_task ); |
162 | } |
163 | set_ref_count(my_num_childs+1); |
164 | spawn(list); |
165 | |
166 | wait_for_all(); |
167 | return 0; |
168 | } |
169 | }; |
170 | |
171 | void RunTaskListGenerator() |
172 | { |
173 | const int max_num_childs = 10000; |
174 | int num_childs=3; |
175 | |
176 | while ( num_childs < max_num_childs ) |
177 | { |
178 | tbb::task& root = *new( tbb::task::allocate_root() ) TaskList(num_childs); |
179 | |
180 | tbb::task::spawn_root_and_wait(root); |
181 | |
182 | num_childs = 3 * num_childs; |
183 | } |
184 | } |
185 | |
186 | //! Tests whether task scheduler allows thieves to hoard task objects. |
187 | /** The test takes a while to run, so we run it only with the default |
188 | number of threads. */ |
189 | void TestTaskReclamation() { |
190 | REMARK("testing task reclamation\n" ); |
191 | |
192 | size_t initial_amount_of_memory = 0; |
193 | double task_count_sum = 0; |
194 | double task_count_sum_square = 0; |
195 | double average, sigma; |
196 | |
197 | tbb::task_scheduler_init init (MinThread); |
198 | REMARK("Starting with %d threads\n" , MinThread); |
199 | // For now, the master will produce "additional" tasks; later a worker will replace it; |
200 | Producer = internal::governor::local_scheduler(); |
201 | int N = InitialStatsIterations; |
202 | // First N iterations fill internal buffers and collect initial statistics |
203 | for( int i=0; i<N; ++i ) { |
204 | // First N iterations fill internal buffers and collect initial statistics |
205 | RunTaskGenerators(); |
206 | RunTaskListGenerator(); |
207 | |
208 | size_t m = GetMemoryUsage(); |
209 | if( m-initial_amount_of_memory > 0) |
210 | initial_amount_of_memory = m; |
211 | |
212 | intptr_t n = internal::governor::local_scheduler()->get_task_node_count( /*count_arena_workers=*/true ); |
213 | task_count_sum += n; |
214 | task_count_sum_square += n*n; |
215 | |
216 | REMARK( "Consumed %ld bytes and %ld objects (iteration=%d)\n" , long(m), long(n), i ); |
217 | } |
218 | // Calculate statistical values |
219 | average = task_count_sum / N; |
220 | sigma = sqrt( (task_count_sum_square - task_count_sum*task_count_sum/N)/N ); |
221 | REMARK("Average task count: %g, sigma: %g, sum: %g, square sum:%g \n" , average, sigma, task_count_sum, task_count_sum_square); |
222 | |
223 | int last_error_iteration = 0, |
224 | producer_switch_iteration = 0, |
225 | producer_switches = 0; |
226 | bool switchProducer = false, |
227 | checkProducer = false; |
228 | for( int i=0; i < MaxIterations; ++i ) { |
229 | // These iterations check for excessive memory use and unreasonable task count |
230 | RunTaskGenerators( switchProducer, checkProducer ); |
231 | RunTaskListGenerator(); |
232 | |
233 | intptr_t n = internal::governor::local_scheduler()->get_task_node_count( /*count_arena_workers=*/true ); |
234 | size_t m = GetMemoryUsage(); |
235 | |
236 | if( (m-initial_amount_of_memory > 0) && (n > average+4*sigma) ) { |
237 | // Use 4*sigma interval (for normal distribution, 3*sigma contains ~99% of values). |
238 | REMARK( "Warning: possible leak of up to %ld bytes; currently %ld cached task objects (iteration=%d)\n" , |
239 | static_cast<unsigned long>(m-initial_amount_of_memory), long(n), i ); |
240 | last_error_iteration = i; |
241 | initial_amount_of_memory = m; |
242 | } else { |
243 | REMARK( "Consumed %ld bytes and %ld objects (iteration=%d)\n" , long(m), long(n), i ); |
244 | } |
245 | if ( i == last_error_iteration + AsymptoticRange ) { |
246 | if ( producer_switches++ == NumProducerSwitches ) |
247 | break; |
248 | else { |
249 | last_error_iteration = producer_switch_iteration = i; |
250 | switchProducer = true; |
251 | } |
252 | } |
253 | else { |
254 | switchProducer = false; |
255 | checkProducer = producer_switch_iteration && (i == producer_switch_iteration + ProducerCheckTimeout); |
256 | } |
257 | } |
258 | ASSERT( last_error_iteration < MaxIterations - AsymptoticRange, "The amount of allocated tasks keeps growing. Leak is possible." ); |
259 | } |
260 | |
261 | int TestMain () { |
262 | if( !GetMemoryUsage() ) { |
263 | REMARK("GetMemoryUsage is not implemented for this platform\n" ); |
264 | return Harness::Skipped; |
265 | } |
266 | TestTaskReclamation(); |
267 | return Harness::Done; |
268 | } |
269 | |