1 | /* |
2 | * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/shared/gcTimer.hpp" |
27 | #include "gc/shared/referenceProcessorPhaseTimes.hpp" |
28 | #include "gc/shared/referenceProcessor.inline.hpp" |
29 | #include "gc/shared/workerDataArray.inline.hpp" |
30 | #include "logging/log.hpp" |
31 | #include "logging/logStream.hpp" |
32 | #include "memory/allocation.inline.hpp" |
33 | |
34 | #define ASSERT_REF_TYPE(ref_type) assert((ref_type) >= REF_SOFT && (ref_type) <= REF_PHANTOM, \ |
35 | "Invariant (%d)", (int)ref_type) |
36 | |
37 | #define ASSERT_PHASE(phase) assert((phase) >= ReferenceProcessor::RefPhase1 && \ |
38 | (phase) < ReferenceProcessor::RefPhaseMax, \ |
39 | "Invariant (%d)", (int)phase); |
40 | |
41 | #define ASSERT_SUB_PHASE(phase) assert((phase) >= ReferenceProcessor::SoftRefSubPhase1 && \ |
42 | (phase) < ReferenceProcessor::RefSubPhaseMax, \ |
43 | "Invariant (%d)", (int)phase); |
44 | |
45 | static const char* SubPhasesParWorkTitle[ReferenceProcessor::RefSubPhaseMax] = { |
46 | "SoftRef (ms):" , |
47 | "SoftRef (ms):" , |
48 | "WeakRef (ms):" , |
49 | "FinalRef (ms):" , |
50 | "FinalRef (ms):" , |
51 | "PhantomRef (ms):" |
52 | }; |
53 | |
54 | static const char* Phase2ParWorkTitle = "Total (ms):" ; |
55 | |
56 | static const char* SubPhasesSerWorkTitle[ReferenceProcessor::RefSubPhaseMax] = { |
57 | "SoftRef:" , |
58 | "SoftRef:" , |
59 | "WeakRef:" , |
60 | "FinalRef:" , |
61 | "FinalRef:" , |
62 | "PhantomRef:" |
63 | }; |
64 | |
65 | static const char* Phase2SerWorkTitle = "Total:" ; |
66 | |
67 | static const char* Indents[6] = {"" , " " , " " , " " , " " , " " }; |
68 | |
69 | static const char* PhaseNames[ReferenceProcessor::RefPhaseMax] = { |
70 | "Reconsider SoftReferences" , |
71 | "Notify Soft/WeakReferences" , |
72 | "Notify and keep alive finalizable" , |
73 | "Notify PhantomReferences" |
74 | }; |
75 | |
76 | static const char* ReferenceTypeNames[REF_PHANTOM + 1] = { |
77 | "None" , "Other" , "SoftReference" , "WeakReference" , "FinalReference" , "PhantomReference" |
78 | }; |
79 | |
80 | STATIC_ASSERT((REF_PHANTOM + 1) == ARRAY_SIZE(ReferenceTypeNames)); |
81 | |
82 | static const char* phase_enum_2_phase_string(ReferenceProcessor::RefProcPhases phase) { |
83 | ASSERT_PHASE(phase); |
84 | return PhaseNames[phase]; |
85 | } |
86 | |
87 | static const char* ref_type_2_string(ReferenceType ref_type) { |
88 | ASSERT_REF_TYPE(ref_type); |
89 | return ReferenceTypeNames[ref_type]; |
90 | } |
91 | |
92 | RefProcWorkerTimeTracker::RefProcWorkerTimeTracker(WorkerDataArray<double>* worker_time, uint worker_id) : |
93 | _worker_time(worker_time), _start_time(os::elapsedTime()), _worker_id(worker_id) { |
94 | assert(worker_time != NULL, "Invariant" ); |
95 | } |
96 | |
97 | RefProcWorkerTimeTracker::~RefProcWorkerTimeTracker() { |
98 | double result = os::elapsedTime() - _start_time; |
99 | _worker_time->set(_worker_id, result); |
100 | } |
101 | |
102 | RefProcSubPhasesWorkerTimeTracker::RefProcSubPhasesWorkerTimeTracker(ReferenceProcessor::RefProcSubPhases phase, |
103 | ReferenceProcessorPhaseTimes* phase_times, |
104 | uint worker_id) : |
105 | RefProcWorkerTimeTracker(phase_times->sub_phase_worker_time_sec(phase), worker_id) { |
106 | } |
107 | |
108 | RefProcSubPhasesWorkerTimeTracker::~RefProcSubPhasesWorkerTimeTracker() { |
109 | } |
110 | |
111 | RefProcPhaseTimeBaseTracker::RefProcPhaseTimeBaseTracker(const char* title, |
112 | ReferenceProcessor::RefProcPhases phase_number, |
113 | ReferenceProcessorPhaseTimes* phase_times) : |
114 | _phase_times(phase_times), _start_ticks(), _end_ticks(), _phase_number(phase_number) { |
115 | assert(_phase_times != NULL, "Invariant" ); |
116 | |
117 | _start_ticks.stamp(); |
118 | if (_phase_times->gc_timer() != NULL) { |
119 | _phase_times->gc_timer()->register_gc_phase_start(title, _start_ticks); |
120 | } |
121 | } |
122 | |
123 | Ticks RefProcPhaseTimeBaseTracker::end_ticks() { |
124 | // If ASSERT is defined, the default value of Ticks will be -2. |
125 | if (_end_ticks.value() <= 0) { |
126 | _end_ticks.stamp(); |
127 | } |
128 | |
129 | return _end_ticks; |
130 | } |
131 | |
132 | double RefProcPhaseTimeBaseTracker::elapsed_time() { |
133 | jlong end_value = end_ticks().value(); |
134 | |
135 | return TimeHelper::counter_to_millis(end_value - _start_ticks.value()); |
136 | } |
137 | |
138 | RefProcPhaseTimeBaseTracker::~RefProcPhaseTimeBaseTracker() { |
139 | if (_phase_times->gc_timer() != NULL) { |
140 | Ticks ticks = end_ticks(); |
141 | _phase_times->gc_timer()->register_gc_phase_end(ticks); |
142 | } |
143 | } |
144 | |
145 | RefProcBalanceQueuesTimeTracker::RefProcBalanceQueuesTimeTracker(ReferenceProcessor::RefProcPhases phase_number, |
146 | ReferenceProcessorPhaseTimes* phase_times) : |
147 | RefProcPhaseTimeBaseTracker("Balance queues" , phase_number, phase_times) {} |
148 | |
149 | RefProcBalanceQueuesTimeTracker::~RefProcBalanceQueuesTimeTracker() { |
150 | double elapsed = elapsed_time(); |
151 | phase_times()->set_balance_queues_time_ms(_phase_number, elapsed); |
152 | } |
153 | |
154 | RefProcPhaseTimeTracker::RefProcPhaseTimeTracker(ReferenceProcessor::RefProcPhases phase_number, |
155 | ReferenceProcessorPhaseTimes* phase_times) : |
156 | RefProcPhaseTimeBaseTracker(phase_enum_2_phase_string(phase_number), phase_number, phase_times) { |
157 | } |
158 | |
159 | RefProcPhaseTimeTracker::~RefProcPhaseTimeTracker() { |
160 | double elapsed = elapsed_time(); |
161 | phase_times()->set_phase_time_ms(_phase_number, elapsed); |
162 | } |
163 | |
164 | RefProcTotalPhaseTimesTracker::RefProcTotalPhaseTimesTracker(ReferenceProcessor::RefProcPhases phase_number, |
165 | ReferenceProcessorPhaseTimes* phase_times, |
166 | ReferenceProcessor* rp) : |
167 | RefProcPhaseTimeBaseTracker(phase_enum_2_phase_string(phase_number), phase_number, phase_times), _rp(rp) { |
168 | } |
169 | |
170 | RefProcTotalPhaseTimesTracker::~RefProcTotalPhaseTimesTracker() { |
171 | double elapsed = elapsed_time(); |
172 | phase_times()->set_phase_time_ms(_phase_number, elapsed); |
173 | } |
174 | |
175 | ReferenceProcessorPhaseTimes::ReferenceProcessorPhaseTimes(GCTimer* gc_timer, uint max_gc_threads) : |
176 | _processing_is_mt(false), _gc_timer(gc_timer) { |
177 | |
178 | for (uint i = 0; i < ReferenceProcessor::RefSubPhaseMax; i++) { |
179 | _sub_phases_worker_time_sec[i] = new WorkerDataArray<double>(max_gc_threads, SubPhasesParWorkTitle[i]); |
180 | } |
181 | _phase2_worker_time_sec = new WorkerDataArray<double>(max_gc_threads, Phase2ParWorkTitle); |
182 | |
183 | reset(); |
184 | } |
185 | |
186 | inline int ref_type_2_index(ReferenceType ref_type) { |
187 | return ref_type - REF_SOFT; |
188 | } |
189 | |
190 | WorkerDataArray<double>* ReferenceProcessorPhaseTimes::sub_phase_worker_time_sec(ReferenceProcessor::RefProcSubPhases sub_phase) const { |
191 | ASSERT_SUB_PHASE(sub_phase); |
192 | return _sub_phases_worker_time_sec[sub_phase]; |
193 | } |
194 | |
195 | double ReferenceProcessorPhaseTimes::phase_time_ms(ReferenceProcessor::RefProcPhases phase) const { |
196 | ASSERT_PHASE(phase); |
197 | return _phases_time_ms[phase]; |
198 | } |
199 | |
200 | void ReferenceProcessorPhaseTimes::set_phase_time_ms(ReferenceProcessor::RefProcPhases phase, |
201 | double phase_time_ms) { |
202 | ASSERT_PHASE(phase); |
203 | _phases_time_ms[phase] = phase_time_ms; |
204 | } |
205 | |
206 | void ReferenceProcessorPhaseTimes::reset() { |
207 | for (int i = 0; i < ReferenceProcessor::RefSubPhaseMax; i++) { |
208 | _sub_phases_worker_time_sec[i]->reset(); |
209 | _sub_phases_total_time_ms[i] = uninitialized(); |
210 | } |
211 | |
212 | for (int i = 0; i < ReferenceProcessor::RefPhaseMax; i++) { |
213 | _phases_time_ms[i] = uninitialized(); |
214 | _balance_queues_time_ms[i] = uninitialized(); |
215 | } |
216 | |
217 | _phase2_worker_time_sec->reset(); |
218 | |
219 | for (int i = 0; i < number_of_subclasses_of_ref; i++) { |
220 | _ref_cleared[i] = 0; |
221 | _ref_discovered[i] = 0; |
222 | } |
223 | |
224 | _total_time_ms = uninitialized(); |
225 | |
226 | _processing_is_mt = false; |
227 | } |
228 | |
229 | ReferenceProcessorPhaseTimes::~ReferenceProcessorPhaseTimes() { |
230 | for (int i = 0; i < ReferenceProcessor::RefSubPhaseMax; i++) { |
231 | delete _sub_phases_worker_time_sec[i]; |
232 | } |
233 | delete _phase2_worker_time_sec; |
234 | } |
235 | |
236 | double ReferenceProcessorPhaseTimes::sub_phase_total_time_ms(ReferenceProcessor::RefProcSubPhases sub_phase) const { |
237 | ASSERT_SUB_PHASE(sub_phase); |
238 | return _sub_phases_total_time_ms[sub_phase]; |
239 | } |
240 | |
241 | void ReferenceProcessorPhaseTimes::set_sub_phase_total_phase_time_ms(ReferenceProcessor::RefProcSubPhases sub_phase, |
242 | double time_ms) { |
243 | ASSERT_SUB_PHASE(sub_phase); |
244 | _sub_phases_total_time_ms[sub_phase] = time_ms; |
245 | } |
246 | |
247 | void ReferenceProcessorPhaseTimes::add_ref_cleared(ReferenceType ref_type, size_t count) { |
248 | ASSERT_REF_TYPE(ref_type); |
249 | Atomic::add(count, &_ref_cleared[ref_type_2_index(ref_type)]); |
250 | } |
251 | |
252 | void ReferenceProcessorPhaseTimes::set_ref_discovered(ReferenceType ref_type, size_t count) { |
253 | ASSERT_REF_TYPE(ref_type); |
254 | _ref_discovered[ref_type_2_index(ref_type)] = count; |
255 | } |
256 | |
257 | double ReferenceProcessorPhaseTimes::balance_queues_time_ms(ReferenceProcessor::RefProcPhases phase) const { |
258 | ASSERT_PHASE(phase); |
259 | return _balance_queues_time_ms[phase]; |
260 | } |
261 | |
262 | void ReferenceProcessorPhaseTimes::set_balance_queues_time_ms(ReferenceProcessor::RefProcPhases phase, double time_ms) { |
263 | ASSERT_PHASE(phase); |
264 | _balance_queues_time_ms[phase] = time_ms; |
265 | } |
266 | |
267 | #define TIME_FORMAT "%.1lfms" |
268 | |
269 | void ReferenceProcessorPhaseTimes::print_all_references(uint base_indent, bool print_total) const { |
270 | if (print_total) { |
271 | LogTarget(Debug, gc, phases, ref) lt; |
272 | |
273 | if (lt.is_enabled()) { |
274 | LogStream ls(lt); |
275 | ls.print_cr("%s%s: " TIME_FORMAT, |
276 | Indents[base_indent], "Reference Processing" , total_time_ms()); |
277 | } |
278 | } |
279 | |
280 | uint next_indent = base_indent + 1; |
281 | print_phase(ReferenceProcessor::RefPhase1, next_indent); |
282 | print_phase(ReferenceProcessor::RefPhase2, next_indent); |
283 | print_phase(ReferenceProcessor::RefPhase3, next_indent); |
284 | print_phase(ReferenceProcessor::RefPhase4, next_indent); |
285 | |
286 | print_reference(REF_SOFT, next_indent); |
287 | print_reference(REF_WEAK, next_indent); |
288 | print_reference(REF_FINAL, next_indent); |
289 | print_reference(REF_PHANTOM, next_indent); |
290 | |
291 | } |
292 | |
293 | void ReferenceProcessorPhaseTimes::print_reference(ReferenceType ref_type, uint base_indent) const { |
294 | LogTarget(Debug, gc, phases, ref) lt; |
295 | |
296 | if (lt.is_enabled()) { |
297 | LogStream ls(lt); |
298 | ResourceMark rm; |
299 | |
300 | ls.print_cr("%s%s:" , Indents[base_indent], ref_type_2_string(ref_type)); |
301 | |
302 | uint const next_indent = base_indent + 1; |
303 | int const ref_type_index = ref_type_2_index(ref_type); |
304 | |
305 | ls.print_cr("%sDiscovered: " SIZE_FORMAT, Indents[next_indent], _ref_discovered[ref_type_index]); |
306 | ls.print_cr("%sCleared: " SIZE_FORMAT, Indents[next_indent], _ref_cleared[ref_type_index]); |
307 | } |
308 | } |
309 | |
310 | void ReferenceProcessorPhaseTimes::print_phase(ReferenceProcessor::RefProcPhases phase, uint indent) const { |
311 | double phase_time = phase_time_ms(phase); |
312 | |
313 | if (phase_time == uninitialized()) { |
314 | return; |
315 | } |
316 | |
317 | LogTarget(Debug, gc, phases, ref) lt; |
318 | LogStream ls(lt); |
319 | |
320 | ls.print_cr("%s%s%s " TIME_FORMAT, |
321 | Indents[indent], |
322 | phase_enum_2_phase_string(phase), |
323 | indent == 0 ? "" : ":" , /* 0 indent logs don't need colon. */ |
324 | phase_time); |
325 | |
326 | LogTarget(Debug, gc, phases, ref) lt2; |
327 | if (lt2.is_enabled()) { |
328 | LogStream ls(lt2); |
329 | |
330 | if (_processing_is_mt) { |
331 | print_balance_time(&ls, phase, indent + 1); |
332 | } |
333 | |
334 | switch (phase) { |
335 | case ReferenceProcessor::RefPhase1: |
336 | print_sub_phase(&ls, ReferenceProcessor::SoftRefSubPhase1, indent + 1); |
337 | break; |
338 | case ReferenceProcessor::RefPhase2: |
339 | print_sub_phase(&ls, ReferenceProcessor::SoftRefSubPhase2, indent + 1); |
340 | print_sub_phase(&ls, ReferenceProcessor::WeakRefSubPhase2, indent + 1); |
341 | print_sub_phase(&ls, ReferenceProcessor::FinalRefSubPhase2, indent + 1); |
342 | break; |
343 | case ReferenceProcessor::RefPhase3: |
344 | print_sub_phase(&ls, ReferenceProcessor::FinalRefSubPhase3, indent + 1); |
345 | break; |
346 | case ReferenceProcessor::RefPhase4: |
347 | print_sub_phase(&ls, ReferenceProcessor::PhantomRefSubPhase4, indent + 1); |
348 | break; |
349 | default: |
350 | ShouldNotReachHere(); |
351 | } |
352 | if (phase == ReferenceProcessor::RefPhase2) { |
353 | print_worker_time(&ls, _phase2_worker_time_sec, Phase2SerWorkTitle, indent + 1); |
354 | } |
355 | } |
356 | } |
357 | |
358 | void ReferenceProcessorPhaseTimes::print_balance_time(LogStream* ls, ReferenceProcessor::RefProcPhases phase, uint indent) const { |
359 | double balance_time = balance_queues_time_ms(phase); |
360 | if (balance_time != uninitialized()) { |
361 | ls->print_cr("%s%s " TIME_FORMAT, Indents[indent], "Balance queues:" , balance_time); |
362 | } |
363 | } |
364 | |
365 | void ReferenceProcessorPhaseTimes::print_sub_phase(LogStream* ls, ReferenceProcessor::RefProcSubPhases sub_phase, uint indent) const { |
366 | print_worker_time(ls, _sub_phases_worker_time_sec[sub_phase], SubPhasesSerWorkTitle[sub_phase], indent); |
367 | } |
368 | |
369 | void ReferenceProcessorPhaseTimes::print_worker_time(LogStream* ls, WorkerDataArray<double>* worker_time, const char* ser_title, uint indent) const { |
370 | ls->print("%s" , Indents[indent]); |
371 | if (_processing_is_mt) { |
372 | worker_time->print_summary_on(ls, true); |
373 | LogTarget(Trace, gc, phases, task) lt; |
374 | if (lt.is_enabled()) { |
375 | LogStream ls2(lt); |
376 | ls2.print("%s" , Indents[indent]); |
377 | worker_time->print_details_on(&ls2); |
378 | } |
379 | } else { |
380 | if (worker_time->get(0) != uninitialized()) { |
381 | ls->print_cr("%s " TIME_FORMAT, |
382 | ser_title, |
383 | worker_time->get(0) * MILLIUNITS); |
384 | } else { |
385 | ls->print_cr("%s skipped" , ser_title); |
386 | } |
387 | } |
388 | } |
389 | |
390 | #undef ASSERT_REF_TYPE |
391 | #undef ASSERT_SUB_PHASE |
392 | #undef ASSERT_PHASE |
393 | #undef TIME_FORMAT |
394 | |