1/*
2 * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/copyFailedInfo.hpp"
27#include "gc/shared/gcHeapSummary.hpp"
28#include "gc/shared/gcTimer.hpp"
29#include "gc/shared/gcTrace.hpp"
30#include "gc/shared/gcWhen.hpp"
31#include "jfr/jfrEvents.hpp"
32#include "runtime/os.hpp"
33#include "utilities/macros.hpp"
34#if INCLUDE_G1GC
35#include "gc/g1/g1EvacuationInfo.hpp"
36#include "gc/g1/g1YCTypes.hpp"
37#endif
38
39// All GC dependencies against the trace framework is contained within this file.
40
41typedef uintptr_t TraceAddress;
42
43void GCTracer::send_garbage_collection_event() const {
44 EventGarbageCollection event(UNTIMED);
45 if (event.should_commit()) {
46 event.set_gcId(GCId::current());
47 event.set_name(_shared_gc_info.name());
48 event.set_cause((u2) _shared_gc_info.cause());
49 event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
50 event.set_longestPause(_shared_gc_info.longest_pause());
51 event.set_starttime(_shared_gc_info.start_timestamp());
52 event.set_endtime(_shared_gc_info.end_timestamp());
53 event.commit();
54 }
55}
56
57void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
58 EventGCReferenceStatistics e;
59 if (e.should_commit()) {
60 e.set_gcId(GCId::current());
61 e.set_type((u1)type);
62 e.set_count(count);
63 e.commit();
64 }
65}
66
67void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype,
68 const MetaspaceChunkFreeListSummary& summary) const {
69 EventMetaspaceChunkFreeListSummary e;
70 if (e.should_commit()) {
71 e.set_gcId(GCId::current());
72 e.set_when(when);
73 e.set_metadataType(mdtype);
74
75 e.set_specializedChunks(summary.num_specialized_chunks());
76 e.set_specializedChunksTotalSize(summary.specialized_chunks_size_in_bytes());
77
78 e.set_smallChunks(summary.num_small_chunks());
79 e.set_smallChunksTotalSize(summary.small_chunks_size_in_bytes());
80
81 e.set_mediumChunks(summary.num_medium_chunks());
82 e.set_mediumChunksTotalSize(summary.medium_chunks_size_in_bytes());
83
84 e.set_humongousChunks(summary.num_humongous_chunks());
85 e.set_humongousChunksTotalSize(summary.humongous_chunks_size_in_bytes());
86
87 e.commit();
88 }
89}
90
91void ParallelOldTracer::send_parallel_old_event() const {
92 EventParallelOldGarbageCollection e(UNTIMED);
93 if (e.should_commit()) {
94 e.set_gcId(GCId::current());
95 e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
96 e.set_starttime(_shared_gc_info.start_timestamp());
97 e.set_endtime(_shared_gc_info.end_timestamp());
98 e.commit();
99 }
100}
101
102void YoungGCTracer::send_young_gc_event() const {
103 EventYoungGarbageCollection e(UNTIMED);
104 if (e.should_commit()) {
105 e.set_gcId(GCId::current());
106 e.set_tenuringThreshold(_tenuring_threshold);
107 e.set_starttime(_shared_gc_info.start_timestamp());
108 e.set_endtime(_shared_gc_info.end_timestamp());
109 e.commit();
110 }
111}
112
113bool YoungGCTracer::should_send_promotion_in_new_plab_event() const {
114 return EventPromoteObjectInNewPLAB::is_enabled();
115}
116
117bool YoungGCTracer::should_send_promotion_outside_plab_event() const {
118 return EventPromoteObjectOutsidePLAB::is_enabled();
119}
120
121void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
122 uint age, bool tenured,
123 size_t plab_size) const {
124
125 EventPromoteObjectInNewPLAB event;
126 if (event.should_commit()) {
127 event.set_gcId(GCId::current());
128 event.set_objectClass(klass);
129 event.set_objectSize(obj_size);
130 event.set_tenured(tenured);
131 event.set_tenuringAge(age);
132 event.set_plabSize(plab_size);
133 event.commit();
134 }
135}
136
137void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
138 uint age, bool tenured) const {
139
140 EventPromoteObjectOutsidePLAB event;
141 if (event.should_commit()) {
142 event.set_gcId(GCId::current());
143 event.set_objectClass(klass);
144 event.set_objectSize(obj_size);
145 event.set_tenured(tenured);
146 event.set_tenuringAge(age);
147 event.commit();
148 }
149}
150
151void OldGCTracer::send_old_gc_event() const {
152 EventOldGarbageCollection e(UNTIMED);
153 if (e.should_commit()) {
154 e.set_gcId(GCId::current());
155 e.set_starttime(_shared_gc_info.start_timestamp());
156 e.set_endtime(_shared_gc_info.end_timestamp());
157 e.commit();
158 }
159}
160
161static JfrStructCopyFailed to_struct(const CopyFailedInfo& cf_info) {
162 JfrStructCopyFailed failed_info;
163 failed_info.set_objectCount(cf_info.failed_count());
164 failed_info.set_firstSize(cf_info.first_size());
165 failed_info.set_smallestSize(cf_info.smallest_size());
166 failed_info.set_totalSize(cf_info.total_size());
167 return failed_info;
168}
169
170void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
171 EventPromotionFailed e;
172 if (e.should_commit()) {
173 e.set_gcId(GCId::current());
174 e.set_promotionFailed(to_struct(pf_info));
175 e.set_thread(pf_info.thread_trace_id());
176 e.commit();
177 }
178}
179
180// Common to CMS and G1
181void OldGCTracer::send_concurrent_mode_failure_event() {
182 EventConcurrentModeFailure e;
183 if (e.should_commit()) {
184 e.set_gcId(GCId::current());
185 e.commit();
186 }
187}
188
189#if INCLUDE_G1GC
190void G1NewTracer::send_g1_young_gc_event() {
191 EventG1GarbageCollection e(UNTIMED);
192 if (e.should_commit()) {
193 e.set_gcId(GCId::current());
194 e.set_type(_g1_young_gc_info.type());
195 e.set_starttime(_shared_gc_info.start_timestamp());
196 e.set_endtime(_shared_gc_info.end_timestamp());
197 e.commit();
198 }
199}
200
201void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms) {
202 EventG1MMU e;
203 if (e.should_commit()) {
204 e.set_gcId(GCId::current());
205 e.set_timeSlice(time_slice_ms);
206 e.set_gcTime(gc_time_ms);
207 e.set_pauseTarget(max_time_ms);
208 e.commit();
209 }
210}
211
212void G1NewTracer::send_evacuation_info_event(G1EvacuationInfo* info) {
213 EventEvacuationInformation e;
214 if (e.should_commit()) {
215 e.set_gcId(GCId::current());
216 e.set_cSetRegions(info->collectionset_regions());
217 e.set_cSetUsedBefore(info->collectionset_used_before());
218 e.set_cSetUsedAfter(info->collectionset_used_after());
219 e.set_allocationRegions(info->allocation_regions());
220 e.set_allocationRegionsUsedBefore(info->alloc_regions_used_before());
221 e.set_allocationRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
222 e.set_bytesCopied(info->bytes_copied());
223 e.set_regionsFreed(info->regions_freed());
224 e.commit();
225 }
226}
227
228void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
229 EventEvacuationFailed e;
230 if (e.should_commit()) {
231 e.set_gcId(GCId::current());
232 e.set_evacuationFailed(to_struct(ef_info));
233 e.commit();
234 }
235}
236
237static JfrStructG1EvacuationStatistics
238create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) {
239 JfrStructG1EvacuationStatistics s;
240 s.set_gcId(gcid);
241 s.set_allocated(summary.allocated() * HeapWordSize);
242 s.set_wasted(summary.wasted() * HeapWordSize);
243 s.set_used(summary.used() * HeapWordSize);
244 s.set_undoWaste(summary.undo_wasted() * HeapWordSize);
245 s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize);
246 s.set_regionsRefilled(summary.regions_filled());
247 s.set_directAllocated(summary.direct_allocated() * HeapWordSize);
248 s.set_failureUsed(summary.failure_used() * HeapWordSize);
249 s.set_failureWaste(summary.failure_waste() * HeapWordSize);
250 return s;
251}
252
253void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const {
254 EventG1EvacuationYoungStatistics surv_evt;
255 if (surv_evt.should_commit()) {
256 surv_evt.set_statistics(create_g1_evacstats(GCId::current(), summary));
257 surv_evt.commit();
258 }
259}
260
261void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const {
262 EventG1EvacuationOldStatistics old_evt;
263 if (old_evt.should_commit()) {
264 old_evt.set_statistics(create_g1_evacstats(GCId::current(), summary));
265 old_evt.commit();
266 }
267}
268
269void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
270 size_t target_occupancy,
271 size_t current_occupancy,
272 size_t last_allocation_size,
273 double last_allocation_duration,
274 double last_marking_length) {
275 EventG1BasicIHOP evt;
276 if (evt.should_commit()) {
277 evt.set_gcId(GCId::current());
278 evt.set_threshold(threshold);
279 evt.set_targetOccupancy(target_occupancy);
280 evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
281 evt.set_currentOccupancy(current_occupancy);
282 evt.set_recentMutatorAllocationSize(last_allocation_size);
283 evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
284 evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
285 evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS);
286 evt.commit();
287 }
288}
289
290void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold,
291 size_t internal_target_occupancy,
292 size_t current_occupancy,
293 size_t additional_buffer_size,
294 double predicted_allocation_rate,
295 double predicted_marking_length,
296 bool prediction_active) {
297 EventG1AdaptiveIHOP evt;
298 if (evt.should_commit()) {
299 evt.set_gcId(GCId::current());
300 evt.set_threshold(threshold);
301 evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0);
302 evt.set_ihopTargetOccupancy(internal_target_occupancy);
303 evt.set_currentOccupancy(current_occupancy);
304 evt.set_additionalBufferSize(additional_buffer_size);
305 evt.set_predictedAllocationRate(predicted_allocation_rate);
306 evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS);
307 evt.set_predictionActive(prediction_active);
308 evt.commit();
309 }
310}
311
312#endif // INCLUDE_G1GC
313
314static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) {
315 JfrStructVirtualSpace space;
316 space.set_start((TraceAddress)summary.start());
317 space.set_committedEnd((TraceAddress)summary.committed_end());
318 space.set_committedSize(summary.committed_size());
319 space.set_reservedEnd((TraceAddress)summary.reserved_end());
320 space.set_reservedSize(summary.reserved_size());
321 return space;
322}
323
324static JfrStructObjectSpace to_struct(const SpaceSummary& summary) {
325 JfrStructObjectSpace space;
326 space.set_start((TraceAddress)summary.start());
327 space.set_end((TraceAddress)summary.end());
328 space.set_used(summary.used());
329 space.set_size(summary.size());
330 return space;
331}
332
333class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
334 GCWhen::Type _when;
335 public:
336 GCHeapSummaryEventSender(GCWhen::Type when) : _when(when) {}
337
338 void visit(const GCHeapSummary* heap_summary) const {
339 const VirtualSpaceSummary& heap_space = heap_summary->heap();
340
341 EventGCHeapSummary e;
342 if (e.should_commit()) {
343 e.set_gcId(GCId::current());
344 e.set_when((u1)_when);
345 e.set_heapSpace(to_struct(heap_space));
346 e.set_heapUsed(heap_summary->used());
347 e.commit();
348 }
349 }
350
351 void visit(const G1HeapSummary* g1_heap_summary) const {
352 visit((GCHeapSummary*)g1_heap_summary);
353
354 EventG1HeapSummary e;
355 if (e.should_commit()) {
356 e.set_gcId(GCId::current());
357 e.set_when((u1)_when);
358 e.set_edenUsedSize(g1_heap_summary->edenUsed());
359 e.set_edenTotalSize(g1_heap_summary->edenCapacity());
360 e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
361 e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
362 e.commit();
363 }
364 }
365
366 void visit(const PSHeapSummary* ps_heap_summary) const {
367 visit((GCHeapSummary*)ps_heap_summary);
368
369 const VirtualSpaceSummary& old_summary = ps_heap_summary->old();
370 const SpaceSummary& old_space = ps_heap_summary->old_space();
371 const VirtualSpaceSummary& young_summary = ps_heap_summary->young();
372 const SpaceSummary& eden_space = ps_heap_summary->eden();
373 const SpaceSummary& from_space = ps_heap_summary->from();
374 const SpaceSummary& to_space = ps_heap_summary->to();
375
376 EventPSHeapSummary e;
377 if (e.should_commit()) {
378 e.set_gcId(GCId::current());
379 e.set_when((u1)_when);
380
381 e.set_oldSpace(to_struct(ps_heap_summary->old()));
382 e.set_oldObjectSpace(to_struct(ps_heap_summary->old_space()));
383 e.set_youngSpace(to_struct(ps_heap_summary->young()));
384 e.set_edenSpace(to_struct(ps_heap_summary->eden()));
385 e.set_fromSpace(to_struct(ps_heap_summary->from()));
386 e.set_toSpace(to_struct(ps_heap_summary->to()));
387 e.commit();
388 }
389 }
390};
391
392void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
393 GCHeapSummaryEventSender visitor(when);
394 heap_summary.accept(&visitor);
395}
396
397static JfrStructMetaspaceSizes to_struct(const MetaspaceSizes& sizes) {
398 JfrStructMetaspaceSizes meta_sizes;
399
400 meta_sizes.set_committed(sizes.committed());
401 meta_sizes.set_used(sizes.used());
402 meta_sizes.set_reserved(sizes.reserved());
403
404 return meta_sizes;
405}
406
407void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
408 EventMetaspaceSummary e;
409 if (e.should_commit()) {
410 e.set_gcId(GCId::current());
411 e.set_when((u1) when);
412 e.set_gcThreshold(meta_space_summary.capacity_until_GC());
413 e.set_metaspace(to_struct(meta_space_summary.meta_space()));
414 e.set_dataSpace(to_struct(meta_space_summary.data_space()));
415 e.set_classSpace(to_struct(meta_space_summary.class_space()));
416 e.commit();
417 }
418}
419
420class PhaseSender : public PhaseVisitor {
421 void visit_pause(GCPhase* phase) {
422 assert(phase->level() < PhasesStack::PHASE_LEVELS, "Need more event types for PausePhase");
423
424 switch (phase->level()) {
425 case 0: send_phase<EventGCPhasePause>(phase); break;
426 case 1: send_phase<EventGCPhasePauseLevel1>(phase); break;
427 case 2: send_phase<EventGCPhasePauseLevel2>(phase); break;
428 case 3: send_phase<EventGCPhasePauseLevel3>(phase); break;
429 case 4: send_phase<EventGCPhasePauseLevel4>(phase); break;
430 default: /* Ignore sending this phase */ break;
431 }
432 }
433
434 void visit_concurrent(GCPhase* phase) {
435 assert(phase->level() < 1, "There is only one level for ConcurrentPhase");
436
437 switch (phase->level()) {
438 case 0: send_phase<EventGCPhaseConcurrent>(phase); break;
439 default: /* Ignore sending this phase */ break;
440 }
441 }
442
443 public:
444 template<typename T>
445 void send_phase(GCPhase* phase) {
446 T event(UNTIMED);
447 if (event.should_commit()) {
448 event.set_gcId(GCId::current());
449 event.set_name(phase->name());
450 event.set_starttime(phase->start());
451 event.set_endtime(phase->end());
452 event.commit();
453 }
454 }
455
456 void visit(GCPhase* phase) {
457 if (phase->type() == GCPhase::PausePhaseType) {
458 visit_pause(phase);
459 } else {
460 assert(phase->type() == GCPhase::ConcurrentPhaseType, "Should be ConcurrentPhaseType");
461 visit_concurrent(phase);
462 }
463 }
464};
465
466void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
467 PhaseSender phase_reporter;
468
469 TimePartitionPhasesIterator iter(time_partitions);
470 while (iter.has_next()) {
471 GCPhase* phase = iter.next();
472 phase->accept(&phase_reporter);
473 }
474}
475