1/*
2 * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/copyFailedInfo.hpp"
27#include "gc/shared/gcHeapSummary.hpp"
28#include "gc/shared/gcId.hpp"
29#include "gc/shared/gcTimer.hpp"
30#include "gc/shared/gcTrace.hpp"
31#include "gc/shared/objectCountEventSender.hpp"
32#include "gc/shared/referenceProcessorStats.hpp"
33#include "memory/heapInspection.hpp"
34#include "memory/resourceArea.hpp"
35#include "runtime/os.hpp"
36#include "utilities/globalDefinitions.hpp"
37#include "utilities/macros.hpp"
38#include "utilities/ticks.hpp"
39#if INCLUDE_G1GC
40#include "gc/g1/g1EvacuationInfo.hpp"
41#endif
42
43void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
44 _shared_gc_info.set_cause(cause);
45 _shared_gc_info.set_start_timestamp(timestamp);
46}
47
48void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) {
49 report_gc_start_impl(cause, timestamp);
50}
51
52void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
53 _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
54 _shared_gc_info.set_longest_pause(time_partitions->longest_pause());
55 _shared_gc_info.set_end_timestamp(timestamp);
56
57 send_phase_events(time_partitions);
58 send_garbage_collection_event();
59}
60
61void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions) {
62 report_gc_end_impl(timestamp, time_partitions);
63}
64
65void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {
66 send_reference_stats_event(REF_SOFT, rps.soft_count());
67 send_reference_stats_event(REF_WEAK, rps.weak_count());
68 send_reference_stats_event(REF_FINAL, rps.final_count());
69 send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
70}
71
72#if INCLUDE_SERVICES
73class ObjectCountEventSenderClosure : public KlassInfoClosure {
74 const double _size_threshold_percentage;
75 const size_t _total_size_in_words;
76 const Ticks _timestamp;
77
78 public:
79 ObjectCountEventSenderClosure(size_t total_size_in_words, const Ticks& timestamp) :
80 _size_threshold_percentage(ObjectCountCutOffPercent / 100),
81 _total_size_in_words(total_size_in_words),
82 _timestamp(timestamp)
83 {}
84
85 virtual void do_cinfo(KlassInfoEntry* entry) {
86 if (should_send_event(entry)) {
87 ObjectCountEventSender::send(entry, _timestamp);
88 }
89 }
90
91 private:
92 bool should_send_event(const KlassInfoEntry* entry) const {
93 double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
94 return percentage_of_heap >= _size_threshold_percentage;
95 }
96};
97
98void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
99 assert(is_alive_cl != NULL, "Must supply function to check liveness");
100
101 if (ObjectCountEventSender::should_send_event()) {
102 ResourceMark rm;
103
104 KlassInfoTable cit(false);
105 if (!cit.allocation_failed()) {
106 HeapInspection hi(false, false, false, NULL);
107 hi.populate_table(&cit, is_alive_cl);
108 ObjectCountEventSenderClosure event_sender(cit.size_of_instances_in_words(), Ticks::now());
109 cit.iterate(&event_sender);
110 }
111 }
112}
113#endif // INCLUDE_SERVICES
114
115void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
116 send_gc_heap_summary_event(when, heap_summary);
117}
118
119void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const {
120 send_meta_space_summary_event(when, summary);
121
122 send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary());
123 if (UseCompressedClassPointers) {
124 send_metaspace_chunk_free_list_summary(when, Metaspace::ClassType, summary.class_chunk_free_list_summary());
125 }
126}
127
128void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
129 assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported");
130
131 GCTracer::report_gc_end_impl(timestamp, time_partitions);
132 send_young_gc_event();
133
134 _tenuring_threshold = UNSET_TENURING_THRESHOLD;
135}
136
137void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) const {
138 send_promotion_failed_event(pf_info);
139}
140
141void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) {
142 _tenuring_threshold = tenuring_threshold;
143}
144
145bool YoungGCTracer::should_report_promotion_events() const {
146 return should_report_promotion_in_new_plab_event() ||
147 should_report_promotion_outside_plab_event();
148}
149
150bool YoungGCTracer::should_report_promotion_in_new_plab_event() const {
151 return should_send_promotion_in_new_plab_event();
152}
153
154bool YoungGCTracer::should_report_promotion_outside_plab_event() const {
155 return should_send_promotion_outside_plab_event();
156}
157
158void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
159 uint age, bool tenured,
160 size_t plab_size) const {
161 send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size);
162}
163
164void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
165 uint age, bool tenured) const {
166 send_promotion_outside_plab_event(klass, obj_size, age, tenured);
167}
168
169void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
170 GCTracer::report_gc_end_impl(timestamp, time_partitions);
171 send_old_gc_event();
172}
173
174void ParallelOldTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
175 OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
176 send_parallel_old_event();
177}
178
179void ParallelOldTracer::report_dense_prefix(void* dense_prefix) {
180 _parallel_old_gc_info.report_dense_prefix(dense_prefix);
181}
182
183void OldGCTracer::report_concurrent_mode_failure() {
184 send_concurrent_mode_failure_event();
185}
186
187#if INCLUDE_G1GC
188void G1MMUTracer::report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec) {
189 send_g1_mmu_event(time_slice_sec * MILLIUNITS,
190 gc_time_sec * MILLIUNITS,
191 max_time_sec * MILLIUNITS);
192}
193
194void G1NewTracer::report_yc_type(G1YCType type) {
195 _g1_young_gc_info.set_type(type);
196}
197
198void G1NewTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
199 YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
200 send_g1_young_gc_event();
201}
202
203void G1NewTracer::report_evacuation_info(G1EvacuationInfo* info) {
204 send_evacuation_info_event(info);
205}
206
207void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) {
208 send_evacuation_failed_event(ef_info);
209 ef_info.reset();
210}
211
212void G1NewTracer::report_evacuation_statistics(const G1EvacSummary& young_summary, const G1EvacSummary& old_summary) const {
213 send_young_evacuation_statistics(young_summary);
214 send_old_evacuation_statistics(old_summary);
215}
216
217void G1NewTracer::report_basic_ihop_statistics(size_t threshold,
218 size_t target_ccupancy,
219 size_t current_occupancy,
220 size_t last_allocation_size,
221 double last_allocation_duration,
222 double last_marking_length) {
223 send_basic_ihop_statistics(threshold,
224 target_ccupancy,
225 current_occupancy,
226 last_allocation_size,
227 last_allocation_duration,
228 last_marking_length);
229}
230
231void G1NewTracer::report_adaptive_ihop_statistics(size_t threshold,
232 size_t internal_target_occupancy,
233 size_t current_occupancy,
234 size_t additional_buffer_size,
235 double predicted_allocation_rate,
236 double predicted_marking_length,
237 bool prediction_active) {
238 send_adaptive_ihop_statistics(threshold,
239 internal_target_occupancy,
240 additional_buffer_size,
241 current_occupancy,
242 predicted_allocation_rate,
243 predicted_marking_length,
244 prediction_active);
245}
246
247void G1OldTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
248 _shared_gc_info.set_start_timestamp(timestamp);
249}
250
251void G1OldTracer::set_gc_cause(GCCause::Cause cause) {
252 _shared_gc_info.set_cause(cause);
253}
254
255#endif // INCLUDE_G1GC
256