1 | /* |
2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/shared/taskqueue.hpp" |
27 | #include "gc/shared/owstTaskTerminator.hpp" |
28 | #include "oops/oop.inline.hpp" |
29 | #include "logging/log.hpp" |
30 | #include "runtime/atomic.hpp" |
31 | #include "runtime/os.hpp" |
32 | #include "runtime/thread.inline.hpp" |
33 | #include "utilities/debug.hpp" |
34 | #include "utilities/stack.inline.hpp" |
35 | |
36 | #ifdef TRACESPINNING |
37 | uint ParallelTaskTerminator::_total_yields = 0; |
38 | uint ParallelTaskTerminator::_total_spins = 0; |
39 | uint ParallelTaskTerminator::_total_peeks = 0; |
40 | #endif |
41 | |
42 | #if TASKQUEUE_STATS |
43 | const char * const TaskQueueStats::_names[last_stat_id] = { |
44 | "qpush" , "qpop" , "qpop-s" , "qattempt" , "qsteal" , "opush" , "omax" |
45 | }; |
46 | |
47 | TaskQueueStats & TaskQueueStats::operator +=(const TaskQueueStats & addend) |
48 | { |
49 | for (unsigned int i = 0; i < last_stat_id; ++i) { |
50 | _stats[i] += addend._stats[i]; |
51 | } |
52 | return *this; |
53 | } |
54 | |
55 | void TaskQueueStats::print_header(unsigned int line, outputStream* const stream, |
56 | unsigned int width) |
57 | { |
58 | // Use a width w: 1 <= w <= max_width |
59 | const unsigned int max_width = 40; |
60 | const unsigned int w = MAX2(MIN2(width, max_width), 1U); |
61 | |
62 | if (line == 0) { // spaces equal in width to the header |
63 | const unsigned int hdr_width = w * last_stat_id + last_stat_id - 1; |
64 | stream->print("%*s" , hdr_width, " " ); |
65 | } else if (line == 1) { // labels |
66 | stream->print("%*s" , w, _names[0]); |
67 | for (unsigned int i = 1; i < last_stat_id; ++i) { |
68 | stream->print(" %*s" , w, _names[i]); |
69 | } |
70 | } else if (line == 2) { // dashed lines |
71 | char dashes[max_width + 1]; |
72 | memset(dashes, '-', w); |
73 | dashes[w] = '\0'; |
74 | stream->print("%s" , dashes); |
75 | for (unsigned int i = 1; i < last_stat_id; ++i) { |
76 | stream->print(" %s" , dashes); |
77 | } |
78 | } |
79 | } |
80 | |
81 | void TaskQueueStats::print(outputStream* stream, unsigned int width) const |
82 | { |
83 | #define FMT SIZE_FORMAT_W(*) |
84 | stream->print(FMT, width, _stats[0]); |
85 | for (unsigned int i = 1; i < last_stat_id; ++i) { |
86 | stream->print(" " FMT, width, _stats[i]); |
87 | } |
88 | #undef FMT |
89 | } |
90 | |
91 | #ifdef ASSERT |
92 | // Invariants which should hold after a TaskQueue has been emptied and is |
93 | // quiescent; they do not hold at arbitrary times. |
94 | void TaskQueueStats::verify() const |
95 | { |
96 | assert(get(push) == get(pop) + get(steal), |
97 | "push=" SIZE_FORMAT " pop=" SIZE_FORMAT " steal=" SIZE_FORMAT, |
98 | get(push), get(pop), get(steal)); |
99 | assert(get(pop_slow) <= get(pop), |
100 | "pop_slow=" SIZE_FORMAT " pop=" SIZE_FORMAT, |
101 | get(pop_slow), get(pop)); |
102 | assert(get(steal) <= get(steal_attempt), |
103 | "steal=" SIZE_FORMAT " steal_attempt=" SIZE_FORMAT, |
104 | get(steal), get(steal_attempt)); |
105 | assert(get(overflow) == 0 || get(push) != 0, |
106 | "overflow=" SIZE_FORMAT " push=" SIZE_FORMAT, |
107 | get(overflow), get(push)); |
108 | assert(get(overflow_max_len) == 0 || get(overflow) != 0, |
109 | "overflow_max_len=" SIZE_FORMAT " overflow=" SIZE_FORMAT, |
110 | get(overflow_max_len), get(overflow)); |
111 | } |
112 | #endif // ASSERT |
113 | #endif // TASKQUEUE_STATS |
114 | |
115 | ParallelTaskTerminator:: |
116 | ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : |
117 | _n_threads(n_threads), |
118 | _queue_set(queue_set), |
119 | _offered_termination(0) {} |
120 | |
121 | ParallelTaskTerminator::~ParallelTaskTerminator() { |
122 | assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition" ); |
123 | assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" ); |
124 | } |
125 | |
126 | bool ParallelTaskTerminator::peek_in_queue_set() { |
127 | return _queue_set->peek(); |
128 | } |
129 | |
130 | void ParallelTaskTerminator::yield() { |
131 | assert(_offered_termination <= _n_threads, "Invariant" ); |
132 | os::naked_yield(); |
133 | } |
134 | |
135 | void ParallelTaskTerminator::sleep(uint millis) { |
136 | assert(_offered_termination <= _n_threads, "Invariant" ); |
137 | os::sleep(Thread::current(), millis, false); |
138 | } |
139 | |
140 | bool |
141 | ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) { |
142 | assert(_n_threads > 0, "Initialization is incorrect" ); |
143 | assert(_offered_termination < _n_threads, "Invariant" ); |
144 | Atomic::inc(&_offered_termination); |
145 | |
146 | uint yield_count = 0; |
147 | // Number of hard spin loops done since last yield |
148 | uint hard_spin_count = 0; |
149 | // Number of iterations in the hard spin loop. |
150 | uint hard_spin_limit = WorkStealingHardSpins; |
151 | |
152 | // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done. |
153 | // If it is greater than 0, then start with a small number |
154 | // of spins and increase number with each turn at spinning until |
155 | // the count of hard spins exceeds WorkStealingSpinToYieldRatio. |
156 | // Then do a yield() call and start spinning afresh. |
157 | if (WorkStealingSpinToYieldRatio > 0) { |
158 | hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; |
159 | hard_spin_limit = MAX2(hard_spin_limit, 1U); |
160 | } |
161 | // Remember the initial spin limit. |
162 | uint hard_spin_start = hard_spin_limit; |
163 | |
164 | // Loop waiting for all threads to offer termination or |
165 | // more work. |
166 | while (true) { |
167 | assert(_offered_termination <= _n_threads, "Invariant" ); |
168 | // Are all threads offering termination? |
169 | if (_offered_termination == _n_threads) { |
170 | assert(!peek_in_queue_set(), "Precondition" ); |
171 | return true; |
172 | } else { |
173 | // Look for more work. |
174 | // Periodically sleep() instead of yield() to give threads |
175 | // waiting on the cores the chance to grab this code |
176 | if (yield_count <= WorkStealingYieldsBeforeSleep) { |
177 | // Do a yield or hardspin. For purposes of deciding whether |
178 | // to sleep, count this as a yield. |
179 | yield_count++; |
180 | |
181 | // Periodically call yield() instead spinning |
182 | // After WorkStealingSpinToYieldRatio spins, do a yield() call |
183 | // and reset the counts and starting limit. |
184 | if (hard_spin_count > WorkStealingSpinToYieldRatio) { |
185 | yield(); |
186 | hard_spin_count = 0; |
187 | hard_spin_limit = hard_spin_start; |
188 | #ifdef TRACESPINNING |
189 | _total_yields++; |
190 | #endif |
191 | } else { |
192 | // Hard spin this time |
193 | // Increase the hard spinning period but only up to a limit. |
194 | hard_spin_limit = MIN2(2*hard_spin_limit, |
195 | (uint) WorkStealingHardSpins); |
196 | for (uint j = 0; j < hard_spin_limit; j++) { |
197 | SpinPause(); |
198 | } |
199 | hard_spin_count++; |
200 | #ifdef TRACESPINNING |
201 | _total_spins++; |
202 | #endif |
203 | } |
204 | } else { |
205 | log_develop_trace(gc, task)("ParallelTaskTerminator::offer_termination() thread " PTR_FORMAT " sleeps after %u yields" , |
206 | p2i(Thread::current()), yield_count); |
207 | yield_count = 0; |
208 | // A sleep will cause this processor to seek work on another processor's |
209 | // runqueue, if it has nothing else to run (as opposed to the yield |
210 | // which may only move the thread to the end of the this processor's |
211 | // runqueue). |
212 | sleep(WorkStealingSleepMillis); |
213 | } |
214 | |
215 | #ifdef TRACESPINNING |
216 | _total_peeks++; |
217 | #endif |
218 | if (peek_in_queue_set() || |
219 | (terminator != NULL && terminator->should_exit_termination())) { |
220 | return complete_or_exit_termination(); |
221 | } |
222 | } |
223 | } |
224 | } |
225 | |
226 | #ifdef TRACESPINNING |
227 | void ParallelTaskTerminator::print_termination_counts() { |
228 | log_trace(gc, task)("ParallelTaskTerminator Total yields: %u" |
229 | " Total spins: %u Total peeks: %u" , |
230 | total_yields(), |
231 | total_spins(), |
232 | total_peeks()); |
233 | } |
234 | #endif |
235 | |
236 | bool ParallelTaskTerminator::complete_or_exit_termination() { |
237 | // If termination is ever reached, terminator should stay in such state, |
238 | // so that all threads see the same state |
239 | uint current_offered = _offered_termination; |
240 | uint expected_value; |
241 | do { |
242 | if (current_offered == _n_threads) { |
243 | assert(!peek_in_queue_set(), "Precondition" ); |
244 | return true; |
245 | } |
246 | expected_value = current_offered; |
247 | } while ((current_offered = Atomic::cmpxchg(current_offered - 1, &_offered_termination, current_offered)) != expected_value); |
248 | |
249 | assert(_offered_termination < _n_threads, "Invariant" ); |
250 | return false; |
251 | } |
252 | |
253 | void ParallelTaskTerminator::reset_for_reuse() { |
254 | if (_offered_termination != 0) { |
255 | assert(_offered_termination == _n_threads, |
256 | "Terminator may still be in use" ); |
257 | _offered_termination = 0; |
258 | } |
259 | } |
260 | |
261 | #ifdef ASSERT |
262 | bool ObjArrayTask::is_valid() const { |
263 | return _obj != NULL && _obj->is_objArray() && _index >= 0 && |
264 | _index < objArrayOop(_obj)->length(); |
265 | } |
266 | #endif // ASSERT |
267 | |
268 | void ParallelTaskTerminator::reset_for_reuse(uint n_threads) { |
269 | reset_for_reuse(); |
270 | _n_threads = n_threads; |
271 | } |
272 | |
273 | TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : |
274 | _terminator(UseOWSTTaskTerminator ? new OWSTTaskTerminator(n_threads, queue_set) |
275 | : new ParallelTaskTerminator(n_threads, queue_set)) { |
276 | } |
277 | |
278 | TaskTerminator::~TaskTerminator() { |
279 | if (_terminator != NULL) { |
280 | delete _terminator; |
281 | } |
282 | } |
283 | |
284 | |