1 | /* |
2 | * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | #include "precompiled.hpp" |
25 | #include "jfr/leakprofiler/chains/bitset.hpp" |
26 | #include "jfr/leakprofiler/chains/bfsClosure.hpp" |
27 | #include "jfr/leakprofiler/chains/dfsClosure.hpp" |
28 | #include "jfr/leakprofiler/chains/edge.hpp" |
29 | #include "jfr/leakprofiler/chains/edgeStore.hpp" |
30 | #include "jfr/leakprofiler/chains/edgeQueue.hpp" |
31 | #include "jfr/leakprofiler/utilities/granularTimer.hpp" |
32 | #include "jfr/leakprofiler/utilities/unifiedOop.hpp" |
33 | #include "logging/log.hpp" |
34 | #include "memory/iterator.inline.hpp" |
35 | #include "memory/resourceArea.hpp" |
36 | #include "oops/access.inline.hpp" |
37 | #include "oops/oop.inline.hpp" |
38 | #include "utilities/align.hpp" |
39 | |
40 | BFSClosure::BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits) : |
41 | _edge_queue(edge_queue), |
42 | _edge_store(edge_store), |
43 | _mark_bits(mark_bits), |
44 | _current_parent(NULL), |
45 | _current_frontier_level(0), |
46 | _next_frontier_idx(0), |
47 | _prev_frontier_idx(0), |
48 | _dfs_fallback_idx(0), |
49 | _use_dfs(false) { |
50 | } |
51 | |
52 | static void log_frontier_level_summary(size_t level, |
53 | size_t high_idx, |
54 | size_t low_idx, |
55 | size_t edge_size) { |
56 | const size_t nof_edges_in_frontier = high_idx - low_idx; |
57 | log_trace(jfr, system)( |
58 | "BFS front: " SIZE_FORMAT " edges: " SIZE_FORMAT " size: " SIZE_FORMAT " [KB]" , |
59 | level, |
60 | nof_edges_in_frontier, |
61 | (nof_edges_in_frontier * edge_size) / K |
62 | ); |
63 | } |
64 | |
65 | void BFSClosure::log_completed_frontier() const { |
66 | log_frontier_level_summary(_current_frontier_level, |
67 | _next_frontier_idx, |
68 | _prev_frontier_idx, |
69 | _edge_queue->sizeof_edge()); |
70 | } |
71 | |
72 | void BFSClosure::log_dfs_fallback() const { |
73 | const size_t edge_size = _edge_queue->sizeof_edge(); |
74 | // first complete summary for frontier in progress |
75 | log_frontier_level_summary(_current_frontier_level, |
76 | _next_frontier_idx, |
77 | _prev_frontier_idx, |
78 | edge_size); |
79 | |
80 | // and then also complete the last frontier |
81 | log_frontier_level_summary(_current_frontier_level + 1, |
82 | _edge_queue->bottom(), |
83 | _next_frontier_idx, |
84 | edge_size); |
85 | |
86 | // additional information about DFS fallover |
87 | log_trace(jfr, system)( |
88 | "BFS front: " SIZE_FORMAT " filled edge queue at edge: " SIZE_FORMAT, |
89 | _current_frontier_level, |
90 | _dfs_fallback_idx |
91 | ); |
92 | |
93 | const size_t nof_dfs_completed_edges = _edge_queue->bottom() - _dfs_fallback_idx; |
94 | log_trace(jfr, system)( |
95 | "DFS to complete " SIZE_FORMAT " edges size: " SIZE_FORMAT " [KB]" , |
96 | nof_dfs_completed_edges, |
97 | (nof_dfs_completed_edges * edge_size) / K |
98 | ); |
99 | } |
100 | |
101 | void BFSClosure::process() { |
102 | process_root_set(); |
103 | process_queue(); |
104 | } |
105 | |
106 | void BFSClosure::process_root_set() { |
107 | for (size_t idx = _edge_queue->bottom(); idx < _edge_queue->top(); ++idx) { |
108 | const Edge* edge = _edge_queue->element_at(idx); |
109 | assert(edge->parent() == NULL, "invariant" ); |
110 | process(edge->reference(), edge->pointee()); |
111 | } |
112 | } |
113 | |
114 | void BFSClosure::process(const oop* reference, const oop pointee) { |
115 | closure_impl(reference, pointee); |
116 | } |
117 | void BFSClosure::closure_impl(const oop* reference, const oop pointee) { |
118 | assert(reference != NULL, "invariant" ); |
119 | assert(UnifiedOop::dereference(reference) == pointee, "invariant" ); |
120 | |
121 | if (GranularTimer::is_finished()) { |
122 | return; |
123 | } |
124 | |
125 | if (_use_dfs) { |
126 | assert(_current_parent != NULL, "invariant" ); |
127 | DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, _current_parent); |
128 | return; |
129 | } |
130 | |
131 | if (!_mark_bits->is_marked(pointee)) { |
132 | _mark_bits->mark_obj(pointee); |
133 | // is the pointee a sample object? |
134 | if (NULL == pointee->mark()) { |
135 | add_chain(reference, pointee); |
136 | } |
137 | |
138 | // if we are processinig initial root set, don't add to queue |
139 | if (_current_parent != NULL) { |
140 | _edge_queue->add(_current_parent, reference); |
141 | } |
142 | |
143 | if (_edge_queue->is_full()) { |
144 | dfs_fallback(); |
145 | } |
146 | } |
147 | } |
148 | |
149 | void BFSClosure::add_chain(const oop* reference, const oop pointee) { |
150 | assert(pointee != NULL, "invariant" ); |
151 | assert(NULL == pointee->mark(), "invariant" ); |
152 | Edge leak_edge(_current_parent, reference); |
153 | _edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2); |
154 | } |
155 | |
156 | void BFSClosure::dfs_fallback() { |
157 | assert(_edge_queue->is_full(), "invariant" ); |
158 | _use_dfs = true; |
159 | _dfs_fallback_idx = _edge_queue->bottom(); |
160 | while (!_edge_queue->is_empty()) { |
161 | const Edge* edge = _edge_queue->remove(); |
162 | if (edge->pointee() != NULL) { |
163 | DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, edge); |
164 | } |
165 | } |
166 | } |
167 | |
168 | void BFSClosure::process_queue() { |
169 | assert(_current_frontier_level == 0, "invariant" ); |
170 | assert(_next_frontier_idx == 0, "invariant" ); |
171 | assert(_prev_frontier_idx == 0, "invariant" ); |
172 | |
173 | _next_frontier_idx = _edge_queue->top(); |
174 | while (!is_complete()) { |
175 | iterate(_edge_queue->remove()); // edge_queue.remove() increments bottom |
176 | } |
177 | } |
178 | |
179 | void BFSClosure::step_frontier() const { |
180 | log_completed_frontier(); |
181 | ++_current_frontier_level; |
182 | _prev_frontier_idx = _next_frontier_idx; |
183 | _next_frontier_idx = _edge_queue->top(); |
184 | } |
185 | |
186 | bool BFSClosure::is_complete() const { |
187 | if (_edge_queue->bottom() < _next_frontier_idx) { |
188 | return false; |
189 | } |
190 | if (_edge_queue->bottom() > _next_frontier_idx) { |
191 | // fallback onto DFS as part of processing the frontier |
192 | assert(_dfs_fallback_idx >= _prev_frontier_idx, "invariant" ); |
193 | assert(_dfs_fallback_idx < _next_frontier_idx, "invariant" ); |
194 | log_dfs_fallback(); |
195 | return true; |
196 | } |
197 | assert(_edge_queue->bottom() == _next_frontier_idx, "invariant" ); |
198 | if (_edge_queue->is_empty()) { |
199 | return true; |
200 | } |
201 | step_frontier(); |
202 | return false; |
203 | } |
204 | |
205 | void BFSClosure::iterate(const Edge* parent) { |
206 | assert(parent != NULL, "invariant" ); |
207 | const oop pointee = parent->pointee(); |
208 | assert(pointee != NULL, "invariant" ); |
209 | _current_parent = parent; |
210 | pointee->oop_iterate(this); |
211 | } |
212 | |
213 | void BFSClosure::do_oop(oop* ref) { |
214 | assert(ref != NULL, "invariant" ); |
215 | assert(is_aligned(ref, HeapWordSize), "invariant" ); |
216 | const oop pointee = *ref; |
217 | if (pointee != NULL) { |
218 | closure_impl(ref, pointee); |
219 | } |
220 | } |
221 | |
222 | void BFSClosure::do_oop(narrowOop* ref) { |
223 | assert(ref != NULL, "invariant" ); |
224 | assert(is_aligned(ref, sizeof(narrowOop)), "invariant" ); |
225 | const oop pointee = RawAccess<>::oop_load(ref); |
226 | if (pointee != NULL) { |
227 | closure_impl(UnifiedOop::encode(ref), pointee); |
228 | } |
229 | } |
230 | |
231 | void BFSClosure::do_root(const oop* ref) { |
232 | assert(ref != NULL, "invariant" ); |
233 | assert(is_aligned(ref, HeapWordSize), "invariant" ); |
234 | assert(*ref != NULL, "invariant" ); |
235 | if (!_edge_queue->is_full()) { |
236 | _edge_queue->add(NULL, ref); |
237 | } |
238 | } |
239 | |