1/*
2 * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/nmethod.hpp"
27#include "code/dependencies.hpp"
28#include "code/dependencyContext.hpp"
29#include "memory/resourceArea.hpp"
30#include "runtime/atomic.hpp"
31#include "runtime/perfData.hpp"
32#include "utilities/exceptions.hpp"
33
34PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = NULL;
35PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = NULL;
36PerfCounter* DependencyContext::_perf_total_buckets_stale_count = NULL;
37PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = NULL;
38nmethodBucket* volatile DependencyContext::_purge_list = NULL;
39volatile uint64_t DependencyContext::_cleaning_epoch = 0;
40uint64_t DependencyContext::_cleaning_epoch_monotonic = 0;
41
42void dependencyContext_init() {
43 DependencyContext::init();
44}
45
46void DependencyContext::init() {
47 if (UsePerfData) {
48 EXCEPTION_MARK;
49 _perf_total_buckets_allocated_count =
50 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
51 _perf_total_buckets_deallocated_count =
52 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
53 _perf_total_buckets_stale_count =
54 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
55 _perf_total_buckets_stale_acc_count =
56 PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
57 }
58}
59
60//
61// Walk the list of dependent nmethods searching for nmethods which
62// are dependent on the changes that were passed in and mark them for
63// deoptimization. Returns the number of nmethods found.
64//
65int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
66 int found = 0;
67 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
68 nmethod* nm = b->get_nmethod();
69 // since dependencies aren't removed until an nmethod becomes a zombie,
70 // the dependency list may contain nmethods which aren't alive.
71 if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
72 if (TraceDependencies) {
73 ResourceMark rm;
74 tty->print_cr("Marked for deoptimization");
75 changes.print();
76 nm->print();
77 nm->print_dependencies();
78 }
79 changes.mark_for_deoptimization(nm);
80 found++;
81 }
82 }
83 return found;
84}
85
86//
87// Add an nmethod to the dependency context.
88// It's possible that an nmethod has multiple dependencies on a klass
89// so a count is kept for each bucket to guarantee that creation and
90// deletion of dependencies is consistent.
91//
92void DependencyContext::add_dependent_nmethod(nmethod* nm) {
93 assert_lock_strong(CodeCache_lock);
94 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
95 if (nm == b->get_nmethod()) {
96 b->increment();
97 return;
98 }
99 }
100 nmethodBucket* new_head = new nmethodBucket(nm, NULL);
101 for (;;) {
102 nmethodBucket* head = Atomic::load(_dependency_context_addr);
103 new_head->set_next(head);
104 if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) {
105 break;
106 }
107 }
108 if (UsePerfData) {
109 _perf_total_buckets_allocated_count->inc();
110 }
111}
112
113void DependencyContext::release(nmethodBucket* b) {
114 bool expunge = Atomic::load(&_cleaning_epoch) == 0;
115 if (expunge) {
116 assert_locked_or_safepoint(CodeCache_lock);
117 delete b;
118 if (UsePerfData) {
119 _perf_total_buckets_deallocated_count->inc();
120 }
121 } else {
122 // Mark the context as having stale entries, since it is not safe to
123 // expunge the list right now.
124 for (;;) {
125 nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
126 b->set_purge_list_next(purge_list_head);
127 if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) {
128 break;
129 }
130 }
131 if (UsePerfData) {
132 _perf_total_buckets_stale_count->inc();
133 _perf_total_buckets_stale_acc_count->inc();
134 }
135 }
136}
137
138//
139// Remove an nmethod dependency from the context.
140// Decrement count of the nmethod in the dependency list and, optionally, remove
141// the bucket completely when the count goes to 0. This method must find
142// a corresponding bucket otherwise there's a bug in the recording of dependencies.
143// Can be called concurrently by parallel GC threads.
144//
145void DependencyContext::remove_dependent_nmethod(nmethod* nm) {
146 assert_locked_or_safepoint(CodeCache_lock);
147 nmethodBucket* first = dependencies_not_unloading();
148 nmethodBucket* last = NULL;
149 for (nmethodBucket* b = first; b != NULL; b = b->next_not_unloading()) {
150 if (nm == b->get_nmethod()) {
151 int val = b->decrement();
152 guarantee(val >= 0, "Underflow: %d", val);
153 if (val == 0) {
154 if (last == NULL) {
155 // If there was not a head that was not unloading, we can set a new
156 // head without a CAS, because we know there is no contending cleanup.
157 set_dependencies(b->next_not_unloading());
158 } else {
159 // Only supports a single inserting thread (protected by CodeCache_lock)
160 // for now. Therefore, the next pointer only competes with another cleanup
161 // operation. That interaction does not need a CAS.
162 last->set_next(b->next_not_unloading());
163 }
164 release(b);
165 }
166 return;
167 }
168 last = b;
169 }
170}
171
172//
173// Reclaim all unused buckets.
174//
175void DependencyContext::purge_dependency_contexts() {
176 int removed = 0;
177 for (nmethodBucket* b = _purge_list; b != NULL;) {
178 nmethodBucket* next = b->purge_list_next();
179 removed++;
180 delete b;
181 b = next;
182 }
183 if (UsePerfData && removed > 0) {
184 _perf_total_buckets_deallocated_count->inc(removed);
185 }
186 _purge_list = NULL;
187}
188
189//
190// Cleanup a dependency context by unlinking and placing all dependents corresponding
191// to is_unloading nmethods on a purge list, which will be deleted later when it is safe.
192void DependencyContext::clean_unloading_dependents() {
193 if (!claim_cleanup()) {
194 // Somebody else is cleaning up this dependency context.
195 return;
196 }
197 // Walk the nmethodBuckets and move dead entries on the purge list, which will
198 // be deleted during ClassLoaderDataGraph::purge().
199 nmethodBucket* b = dependencies_not_unloading();
200 while (b != NULL) {
201 nmethodBucket* next = b->next_not_unloading();
202 b = next;
203 }
204}
205
206//
207// Invalidate all dependencies in the context
208int DependencyContext::remove_all_dependents() {
209 nmethodBucket* b = dependencies_not_unloading();
210 set_dependencies(NULL);
211 int marked = 0;
212 int removed = 0;
213 while (b != NULL) {
214 nmethod* nm = b->get_nmethod();
215 if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
216 nm->mark_for_deoptimization();
217 marked++;
218 }
219 nmethodBucket* next = b->next_not_unloading();
220 removed++;
221 release(b);
222 b = next;
223 }
224 if (UsePerfData && removed > 0) {
225 _perf_total_buckets_deallocated_count->inc(removed);
226 }
227 return marked;
228}
229
230#ifndef PRODUCT
231void DependencyContext::print_dependent_nmethods(bool verbose) {
232 int idx = 0;
233 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
234 nmethod* nm = b->get_nmethod();
235 tty->print("[%d] count=%d { ", idx++, b->count());
236 if (!verbose) {
237 nm->print_on(tty, "nmethod");
238 tty->print_cr(" } ");
239 } else {
240 nm->print();
241 nm->print_dependencies();
242 tty->print_cr("--- } ");
243 }
244 }
245}
246
247bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
248 for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
249 if (nm == b->get_nmethod()) {
250#ifdef ASSERT
251 int count = b->count();
252 assert(count >= 0, "count shouldn't be negative: %d", count);
253#endif
254 return true;
255 }
256 }
257 return false;
258}
259
260#endif //PRODUCT
261
262int nmethodBucket::decrement() {
263 return Atomic::sub(1, &_count);
264}
265
266// We use a monotonically increasing epoch counter to track the last epoch a given
267// dependency context was cleaned. GC threads claim cleanup tasks by performing
268// a CAS on this value.
269bool DependencyContext::claim_cleanup() {
270 uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
271 uint64_t last_cleanup = Atomic::load(_last_cleanup_addr);
272 if (last_cleanup >= cleaning_epoch) {
273 return false;
274 }
275 return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
276}
277
278// Retrieve the first nmethodBucket that has a dependent that does not correspond to
279// an is_unloading nmethod. Any nmethodBucket entries observed from the original head
280// that is_unloading() will be unlinked and placed on the purge list.
281nmethodBucket* DependencyContext::dependencies_not_unloading() {
282 for (;;) {
283 // Need acquire becase the read value could come from a concurrent insert.
284 nmethodBucket* head = OrderAccess::load_acquire(_dependency_context_addr);
285 if (head == NULL || !head->get_nmethod()->is_unloading()) {
286 return head;
287 }
288 nmethodBucket* head_next = head->next();
289 OrderAccess::loadload();
290 if (Atomic::load(_dependency_context_addr) != head) {
291 // Unstable load of head w.r.t. head->next
292 continue;
293 }
294 if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
295 // Release is_unloading entries if unlinking was claimed
296 DependencyContext::release(head);
297 }
298 }
299}
300
301// Relaxed accessors
302void DependencyContext::set_dependencies(nmethodBucket* b) {
303 Atomic::store(b, _dependency_context_addr);
304}
305
306nmethodBucket* DependencyContext::dependencies() {
307 return Atomic::load(_dependency_context_addr);
308}
309
310// After the gc_prologue, the dependency contexts may be claimed by the GC
311// and releasing of nmethodBucket entries will be deferred and placed on
312// a purge list to be deleted later.
313void DependencyContext::cleaning_start() {
314 assert(SafepointSynchronize::is_at_safepoint(), "must be");
315 uint64_t epoch = ++_cleaning_epoch_monotonic;
316 Atomic::store(epoch, &_cleaning_epoch);
317}
318
319// The epilogue marks the end of dependency context cleanup by the GC,
320// and also makes subsequent releases of nmethodBuckets cause immediate
321// deletion. It is okay to delay calling of cleaning_end() to a concurrent
322// phase, subsequent to the safepoint operation in which cleaning_start()
323// was called. That allows dependency contexts to be cleaned concurrently.
324void DependencyContext::cleaning_end() {
325 uint64_t epoch = 0;
326 Atomic::store(epoch, &_cleaning_epoch);
327}
328
329// This function skips over nmethodBuckets in the list corresponding to
330// nmethods that are is_unloading. This allows exposing a view of the
331// dependents as-if they were already cleaned, despite being cleaned
332// concurrently. Any entry observed that is_unloading() will be unlinked
333// and placed on the purge list.
334nmethodBucket* nmethodBucket::next_not_unloading() {
335 for (;;) {
336 // Do not need acquire because the loaded entry can never be
337 // concurrently inserted.
338 nmethodBucket* next = Atomic::load(&_next);
339 if (next == NULL || !next->get_nmethod()->is_unloading()) {
340 return next;
341 }
342 nmethodBucket* next_next = Atomic::load(&next->_next);
343 OrderAccess::loadload();
344 if (Atomic::load(&_next) != next) {
345 // Unstable load of next w.r.t. next->next
346 continue;
347 }
348 if (Atomic::cmpxchg(next_next, &_next, next) == next) {
349 // Release is_unloading entries if unlinking was claimed
350 DependencyContext::release(next);
351 }
352 }
353}
354
355// Relaxed accessors
356nmethodBucket* nmethodBucket::next() {
357 return Atomic::load(&_next);
358}
359
360void nmethodBucket::set_next(nmethodBucket* b) {
361 Atomic::store(b, &_next);
362}
363
364nmethodBucket* nmethodBucket::purge_list_next() {
365 return Atomic::load(&_purge_list_next);
366}
367
368void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
369 Atomic::store(b, &_purge_list_next);
370}
371