1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "code/codeCache.hpp" |
27 | #include "code/compiledIC.hpp" |
28 | #include "code/icBuffer.hpp" |
29 | #include "code/nmethod.hpp" |
30 | #include "compiler/compileBroker.hpp" |
31 | #include "gc/shared/collectedHeap.hpp" |
32 | #include "gc/shared/workgroup.hpp" |
33 | #include "jfr/jfrEvents.hpp" |
34 | #include "logging/log.hpp" |
35 | #include "logging/logStream.hpp" |
36 | #include "memory/allocation.inline.hpp" |
37 | #include "memory/resourceArea.hpp" |
38 | #include "memory/universe.hpp" |
39 | #include "oops/method.hpp" |
40 | #include "runtime/atomic.hpp" |
41 | #include "runtime/compilationPolicy.hpp" |
42 | #include "runtime/interfaceSupport.inline.hpp" |
43 | #include "runtime/handshake.hpp" |
44 | #include "runtime/mutexLocker.hpp" |
45 | #include "runtime/orderAccess.hpp" |
46 | #include "runtime/os.hpp" |
47 | #include "runtime/sweeper.hpp" |
48 | #include "runtime/thread.inline.hpp" |
49 | #include "runtime/vmOperations.hpp" |
50 | #include "runtime/vmThread.hpp" |
51 | #include "utilities/events.hpp" |
52 | #include "utilities/xmlstream.hpp" |
53 | |
54 | #ifdef ASSERT |
55 | |
56 | #define SWEEP(nm) record_sweep(nm, __LINE__) |
57 | // Sweeper logging code |
58 | class SweeperRecord { |
59 | public: |
60 | int traversal; |
61 | int compile_id; |
62 | long traversal_mark; |
63 | int state; |
64 | const char* kind; |
65 | address vep; |
66 | address uep; |
67 | int line; |
68 | |
69 | void print() { |
70 | tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = " |
71 | PTR_FORMAT " state = %d traversal_mark %ld line = %d" , |
72 | traversal, |
73 | compile_id, |
74 | kind == NULL ? "" : kind, |
75 | p2i(uep), |
76 | p2i(vep), |
77 | state, |
78 | traversal_mark, |
79 | line); |
80 | } |
81 | }; |
82 | |
83 | static int _sweep_index = 0; |
84 | static SweeperRecord* _records = NULL; |
85 | |
86 | void NMethodSweeper::report_events(int id, address entry) { |
87 | if (_records != NULL) { |
88 | for (int i = _sweep_index; i < SweeperLogEntries; i++) { |
89 | if (_records[i].uep == entry || |
90 | _records[i].vep == entry || |
91 | _records[i].compile_id == id) { |
92 | _records[i].print(); |
93 | } |
94 | } |
95 | for (int i = 0; i < _sweep_index; i++) { |
96 | if (_records[i].uep == entry || |
97 | _records[i].vep == entry || |
98 | _records[i].compile_id == id) { |
99 | _records[i].print(); |
100 | } |
101 | } |
102 | } |
103 | } |
104 | |
105 | void NMethodSweeper::report_events() { |
106 | if (_records != NULL) { |
107 | for (int i = _sweep_index; i < SweeperLogEntries; i++) { |
108 | // skip empty records |
109 | if (_records[i].vep == NULL) continue; |
110 | _records[i].print(); |
111 | } |
112 | for (int i = 0; i < _sweep_index; i++) { |
113 | // skip empty records |
114 | if (_records[i].vep == NULL) continue; |
115 | _records[i].print(); |
116 | } |
117 | } |
118 | } |
119 | |
120 | void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) { |
121 | if (_records != NULL) { |
122 | _records[_sweep_index].traversal = _traversals; |
123 | _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->stack_traversal_mark() : 0; |
124 | _records[_sweep_index].compile_id = nm->compile_id(); |
125 | _records[_sweep_index].kind = nm->compile_kind(); |
126 | _records[_sweep_index].state = nm->get_state(); |
127 | _records[_sweep_index].vep = nm->verified_entry_point(); |
128 | _records[_sweep_index].uep = nm->entry_point(); |
129 | _records[_sweep_index].line = line; |
130 | _sweep_index = (_sweep_index + 1) % SweeperLogEntries; |
131 | } |
132 | } |
133 | |
134 | void NMethodSweeper::init_sweeper_log() { |
135 | if (LogSweeper && _records == NULL) { |
136 | // Create the ring buffer for the logging code |
137 | _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); |
138 | memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); |
139 | } |
140 | } |
141 | #else |
142 | #define SWEEP(nm) |
143 | #endif |
144 | |
145 | CompiledMethodIterator NMethodSweeper::_current(CompiledMethodIterator::all_blobs); // Current compiled method |
146 | long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. |
147 | long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache |
148 | long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper |
149 | long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened |
150 | int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache |
151 | |
152 | volatile bool NMethodSweeper::_should_sweep = false;// Indicates if we should invoke the sweeper |
153 | volatile bool NMethodSweeper::_force_sweep = false;// Indicates if we should force a sweep |
154 | volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: |
155 | // 1) alive -> not_entrant |
156 | // 2) not_entrant -> zombie |
157 | int NMethodSweeper::_hotness_counter_reset_val = 0; |
158 | |
159 | long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed |
160 | long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed |
161 | size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache |
162 | Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping |
163 | Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep |
164 | Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep |
165 | Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction |
166 | |
167 | class MarkActivationClosure: public CodeBlobClosure { |
168 | public: |
169 | virtual void do_code_blob(CodeBlob* cb) { |
170 | assert(cb->is_nmethod(), "CodeBlob should be nmethod" ); |
171 | nmethod* nm = (nmethod*)cb; |
172 | nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); |
173 | // If we see an activation belonging to a non_entrant nmethod, we mark it. |
174 | if (nm->is_not_entrant()) { |
175 | nm->mark_as_seen_on_stack(); |
176 | } |
177 | } |
178 | }; |
179 | static MarkActivationClosure mark_activation_closure; |
180 | |
181 | class SetHotnessClosure: public CodeBlobClosure { |
182 | public: |
183 | virtual void do_code_blob(CodeBlob* cb) { |
184 | assert(cb->is_nmethod(), "CodeBlob should be nmethod" ); |
185 | nmethod* nm = (nmethod*)cb; |
186 | nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); |
187 | } |
188 | }; |
189 | static SetHotnessClosure set_hotness_closure; |
190 | |
191 | |
192 | int NMethodSweeper::hotness_counter_reset_val() { |
193 | if (_hotness_counter_reset_val == 0) { |
194 | _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2; |
195 | } |
196 | return _hotness_counter_reset_val; |
197 | } |
198 | bool NMethodSweeper::wait_for_stack_scanning() { |
199 | return _current.end(); |
200 | } |
201 | |
202 | class NMethodMarkingThreadClosure : public ThreadClosure { |
203 | private: |
204 | CodeBlobClosure* _cl; |
205 | public: |
206 | NMethodMarkingThreadClosure(CodeBlobClosure* cl) : _cl(cl) {} |
207 | void do_thread(Thread* thread) { |
208 | if (thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) { |
209 | JavaThread* jt = (JavaThread*) thread; |
210 | jt->nmethods_do(_cl); |
211 | } |
212 | } |
213 | }; |
214 | |
215 | class NMethodMarkingTask : public AbstractGangTask { |
216 | private: |
217 | NMethodMarkingThreadClosure* _cl; |
218 | public: |
219 | NMethodMarkingTask(NMethodMarkingThreadClosure* cl) : |
220 | AbstractGangTask("Parallel NMethod Marking" ), |
221 | _cl(cl) { |
222 | Threads::change_thread_claim_token(); |
223 | } |
224 | |
225 | ~NMethodMarkingTask() { |
226 | Threads::assert_all_threads_claimed(); |
227 | } |
228 | |
229 | void work(uint worker_id) { |
230 | Threads::possibly_parallel_threads_do(true, _cl); |
231 | } |
232 | }; |
233 | |
234 | /** |
235 | * Scans the stacks of all Java threads and marks activations of not-entrant methods. |
236 | * No need to synchronize access, since 'mark_active_nmethods' is always executed at a |
237 | * safepoint. |
238 | */ |
239 | void NMethodSweeper::mark_active_nmethods() { |
240 | CodeBlobClosure* cl = prepare_mark_active_nmethods(); |
241 | if (cl != NULL) { |
242 | WorkGang* workers = Universe::heap()->get_safepoint_workers(); |
243 | if (workers != NULL) { |
244 | NMethodMarkingThreadClosure tcl(cl); |
245 | NMethodMarkingTask task(&tcl); |
246 | workers->run_task(&task); |
247 | } else { |
248 | Threads::nmethods_do(cl); |
249 | } |
250 | } |
251 | } |
252 | |
253 | CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() { |
254 | #ifdef ASSERT |
255 | if (ThreadLocalHandshakes) { |
256 | assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread" ); |
257 | assert_lock_strong(CodeCache_lock); |
258 | } else { |
259 | assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint" ); |
260 | } |
261 | #endif |
262 | |
263 | // If we do not want to reclaim not-entrant or zombie methods there is no need |
264 | // to scan stacks |
265 | if (!MethodFlushing) { |
266 | return NULL; |
267 | } |
268 | |
269 | // Increase time so that we can estimate when to invoke the sweeper again. |
270 | _time_counter++; |
271 | |
272 | // Check for restart |
273 | assert(_current.method() == NULL, "should only happen between sweeper cycles" ); |
274 | assert(wait_for_stack_scanning(), "should only happen between sweeper cycles" ); |
275 | |
276 | _seen = 0; |
277 | _current = CompiledMethodIterator(CompiledMethodIterator::all_blobs); |
278 | // Initialize to first nmethod |
279 | _current.next(); |
280 | _traversals += 1; |
281 | _total_time_this_sweep = Tickspan(); |
282 | |
283 | if (PrintMethodFlushing) { |
284 | tty->print_cr("### Sweep: stack traversal %ld" , _traversals); |
285 | } |
286 | return &mark_activation_closure; |
287 | } |
288 | |
289 | CodeBlobClosure* NMethodSweeper::prepare_reset_hotness_counters() { |
290 | assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint" ); |
291 | |
292 | // If we do not want to reclaim not-entrant or zombie methods there is no need |
293 | // to scan stacks |
294 | if (!MethodFlushing) { |
295 | return NULL; |
296 | } |
297 | |
298 | // Increase time so that we can estimate when to invoke the sweeper again. |
299 | _time_counter++; |
300 | |
301 | // Check for restart |
302 | if (_current.method() != NULL) { |
303 | if (_current.method()->is_nmethod()) { |
304 | assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid" ); |
305 | } else if (_current.method()->is_aot()) { |
306 | assert(CodeCache::find_blob_unsafe(_current.method()->code_begin()) == _current.method(), "Sweeper AOT method cached state invalid" ); |
307 | } else { |
308 | ShouldNotReachHere(); |
309 | } |
310 | } |
311 | |
312 | return &set_hotness_closure; |
313 | } |
314 | |
315 | /** |
316 | * This function triggers a VM operation that does stack scanning of active |
317 | * methods. Stack scanning is mandatory for the sweeper to make progress. |
318 | */ |
319 | void NMethodSweeper::do_stack_scanning() { |
320 | assert(!CodeCache_lock->owned_by_self(), "just checking" ); |
321 | if (wait_for_stack_scanning()) { |
322 | if (ThreadLocalHandshakes) { |
323 | CodeBlobClosure* code_cl; |
324 | { |
325 | MutexLocker ccl(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
326 | code_cl = prepare_mark_active_nmethods(); |
327 | } |
328 | if (code_cl != NULL) { |
329 | NMethodMarkingThreadClosure tcl(code_cl); |
330 | Handshake::execute(&tcl); |
331 | } |
332 | } else { |
333 | VM_MarkActiveNMethods op; |
334 | VMThread::execute(&op); |
335 | } |
336 | } |
337 | } |
338 | |
339 | void NMethodSweeper::sweeper_loop() { |
340 | bool timeout; |
341 | while (true) { |
342 | { |
343 | ThreadBlockInVM tbivm(JavaThread::current()); |
344 | MonitorLocker waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
345 | const long wait_time = 60*60*24 * 1000; |
346 | timeout = waiter.wait(wait_time); |
347 | } |
348 | if (!timeout) { |
349 | possibly_sweep(); |
350 | } |
351 | } |
352 | } |
353 | |
354 | /** |
355 | * Wakes up the sweeper thread to possibly sweep. |
356 | */ |
357 | void NMethodSweeper::notify(int code_blob_type) { |
358 | // Makes sure that we do not invoke the sweeper too often during startup. |
359 | double start_threshold = 100.0 / (double)StartAggressiveSweepingAt; |
360 | double aggressive_sweep_threshold = MIN2(start_threshold, 1.1); |
361 | if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) { |
362 | assert_locked_or_safepoint(CodeCache_lock); |
363 | CodeCache_lock->notify(); |
364 | } |
365 | } |
366 | |
367 | /** |
368 | * Wakes up the sweeper thread and forces a sweep. Blocks until it finished. |
369 | */ |
370 | void NMethodSweeper::force_sweep() { |
371 | ThreadBlockInVM tbivm(JavaThread::current()); |
372 | MonitorLocker waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
373 | // Request forced sweep |
374 | _force_sweep = true; |
375 | while (_force_sweep) { |
376 | // Notify sweeper that we want to force a sweep and wait for completion. |
377 | // In case a sweep currently takes place we timeout and try again because |
378 | // we want to enforce a full sweep. |
379 | CodeCache_lock->notify(); |
380 | waiter.wait(1000); |
381 | } |
382 | } |
383 | |
384 | /** |
385 | * Handle a safepoint request |
386 | */ |
387 | void NMethodSweeper::handle_safepoint_request() { |
388 | JavaThread* thread = JavaThread::current(); |
389 | if (SafepointMechanism::should_block(thread)) { |
390 | if (PrintMethodFlushing && Verbose) { |
391 | tty->print_cr("### Sweep at %d out of %d, yielding to safepoint" , _seen, CodeCache::nmethod_count()); |
392 | } |
393 | MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
394 | |
395 | ThreadBlockInVM tbivm(thread); |
396 | thread->java_suspend_self(); |
397 | } |
398 | } |
399 | |
400 | /** |
401 | * This function invokes the sweeper if at least one of the three conditions is met: |
402 | * (1) The code cache is getting full |
403 | * (2) There are sufficient state changes in/since the last sweep. |
404 | * (3) We have not been sweeping for 'some time' |
405 | */ |
406 | void NMethodSweeper::possibly_sweep() { |
407 | assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode" ); |
408 | // If there was no state change while nmethod sweeping, 'should_sweep' will be false. |
409 | // This is one of the two places where should_sweep can be set to true. The general |
410 | // idea is as follows: If there is enough free space in the code cache, there is no |
411 | // need to invoke the sweeper. The following formula (which determines whether to invoke |
412 | // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes |
413 | // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore, |
414 | // the formula considers how much space in the code cache is currently used. Here are |
415 | // some examples that will (hopefully) help in understanding. |
416 | // |
417 | // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since |
418 | // the result of the division is 0. This |
419 | // keeps the used code cache size small |
420 | // (important for embedded Java) |
421 | // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula |
422 | // computes: (256 / 16) - 1 = 15 |
423 | // As a result, we invoke the sweeper after |
424 | // 15 invocations of 'mark_active_nmethods. |
425 | // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula |
426 | // computes: (256 / 16) - 10 = 6. |
427 | if (!_should_sweep) { |
428 | const int time_since_last_sweep = _time_counter - _last_sweep; |
429 | // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time, |
430 | // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using |
431 | // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive |
432 | // value) that disables the intended periodic sweeps. |
433 | const int max_wait_time = ReservedCodeCacheSize / (16 * M); |
434 | double wait_until_next_sweep = max_wait_time - time_since_last_sweep - |
435 | MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled), |
436 | CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled)); |
437 | assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect" ); |
438 | |
439 | if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) { |
440 | _should_sweep = true; |
441 | } |
442 | } |
443 | |
444 | // Remember if this was a forced sweep |
445 | bool forced = _force_sweep; |
446 | |
447 | // Force stack scanning if there is only 10% free space in the code cache. |
448 | // We force stack scanning only if the non-profiled code heap gets full, since critical |
449 | // allocations go to the non-profiled heap and we must be make sure that there is |
450 | // enough space. |
451 | double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100; |
452 | if (free_percent <= StartAggressiveSweepingAt || forced || _should_sweep) { |
453 | do_stack_scanning(); |
454 | } |
455 | |
456 | if (_should_sweep || forced) { |
457 | init_sweeper_log(); |
458 | sweep_code_cache(); |
459 | } |
460 | |
461 | // We are done with sweeping the code cache once. |
462 | _total_nof_code_cache_sweeps++; |
463 | _last_sweep = _time_counter; |
464 | // Reset flag; temporarily disables sweeper |
465 | _should_sweep = false; |
466 | // If there was enough state change, 'possibly_enable_sweeper()' |
467 | // sets '_should_sweep' to true |
468 | possibly_enable_sweeper(); |
469 | // Reset _bytes_changed only if there was enough state change. _bytes_changed |
470 | // can further increase by calls to 'report_state_change'. |
471 | if (_should_sweep) { |
472 | _bytes_changed = 0; |
473 | } |
474 | |
475 | if (forced) { |
476 | // Notify requester that forced sweep finished |
477 | assert(_force_sweep, "Should be a forced sweep" ); |
478 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
479 | _force_sweep = false; |
480 | CodeCache_lock->notify(); |
481 | } |
482 | } |
483 | |
484 | static void post_sweep_event(EventSweepCodeCache* event, |
485 | const Ticks& start, |
486 | const Ticks& end, |
487 | s4 traversals, |
488 | int swept, |
489 | int flushed, |
490 | int zombified) { |
491 | assert(event != NULL, "invariant" ); |
492 | assert(event->should_commit(), "invariant" ); |
493 | event->set_starttime(start); |
494 | event->set_endtime(end); |
495 | event->set_sweepId(traversals); |
496 | event->set_sweptCount(swept); |
497 | event->set_flushedCount(flushed); |
498 | event->set_zombifiedCount(zombified); |
499 | event->commit(); |
500 | } |
501 | |
502 | void NMethodSweeper::sweep_code_cache() { |
503 | ResourceMark rm; |
504 | Ticks sweep_start_counter = Ticks::now(); |
505 | |
506 | log_debug(codecache, sweep, start)("CodeCache flushing" ); |
507 | |
508 | int flushed_count = 0; |
509 | int zombified_count = 0; |
510 | int flushed_c2_count = 0; |
511 | |
512 | if (PrintMethodFlushing && Verbose) { |
513 | tty->print_cr("### Sweep at %d out of %d" , _seen, CodeCache::nmethod_count()); |
514 | } |
515 | |
516 | int swept_count = 0; |
517 | assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here" ); |
518 | assert(!CodeCache_lock->owned_by_self(), "just checking" ); |
519 | |
520 | int freed_memory = 0; |
521 | { |
522 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
523 | |
524 | while (!_current.end()) { |
525 | swept_count++; |
526 | // Since we will give up the CodeCache_lock, always skip ahead |
527 | // to the next nmethod. Other blobs can be deleted by other |
528 | // threads but nmethods are only reclaimed by the sweeper. |
529 | CompiledMethod* nm = _current.method(); |
530 | _current.next(); |
531 | |
532 | // Now ready to process nmethod and give up CodeCache_lock |
533 | { |
534 | MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
535 | // Save information before potentially flushing the nmethod |
536 | // Only flushing nmethods so size only matters for them. |
537 | int size = nm->is_nmethod() ? ((nmethod*)nm)->total_size() : 0; |
538 | bool is_c2_method = nm->is_compiled_by_c2(); |
539 | bool is_osr = nm->is_osr_method(); |
540 | int compile_id = nm->compile_id(); |
541 | intptr_t address = p2i(nm); |
542 | const char* state_before = nm->state(); |
543 | const char* state_after = "" ; |
544 | |
545 | MethodStateChange type = process_compiled_method(nm); |
546 | switch (type) { |
547 | case Flushed: |
548 | state_after = "flushed" ; |
549 | freed_memory += size; |
550 | ++flushed_count; |
551 | if (is_c2_method) { |
552 | ++flushed_c2_count; |
553 | } |
554 | break; |
555 | case MadeZombie: |
556 | state_after = "made zombie" ; |
557 | ++zombified_count; |
558 | break; |
559 | case None: |
560 | break; |
561 | default: |
562 | ShouldNotReachHere(); |
563 | } |
564 | if (PrintMethodFlushing && Verbose && type != None) { |
565 | tty->print_cr("### %s nmethod %3d/" PTR_FORMAT " (%s) %s" , is_osr ? "osr" : "" , compile_id, address, state_before, state_after); |
566 | } |
567 | } |
568 | |
569 | _seen++; |
570 | handle_safepoint_request(); |
571 | } |
572 | } |
573 | |
574 | assert(_current.end(), "must have scanned the whole cache" ); |
575 | |
576 | const Ticks sweep_end_counter = Ticks::now(); |
577 | const Tickspan sweep_time = sweep_end_counter - sweep_start_counter; |
578 | { |
579 | MutexLocker mu(NMethodSweeperStats_lock, Mutex::_no_safepoint_check_flag); |
580 | _total_time_sweeping += sweep_time; |
581 | _total_time_this_sweep += sweep_time; |
582 | _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); |
583 | _total_flushed_size += freed_memory; |
584 | _total_nof_methods_reclaimed += flushed_count; |
585 | _total_nof_c2_methods_reclaimed += flushed_c2_count; |
586 | _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); |
587 | } |
588 | |
589 | EventSweepCodeCache event(UNTIMED); |
590 | if (event.should_commit()) { |
591 | post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, flushed_count, zombified_count); |
592 | } |
593 | |
594 | #ifdef ASSERT |
595 | if(PrintMethodFlushing) { |
596 | tty->print_cr("### sweeper: sweep time(" JLONG_FORMAT "): " , sweep_time.value()); |
597 | } |
598 | #endif |
599 | |
600 | Log(codecache, sweep) log; |
601 | if (log.is_debug()) { |
602 | LogStream ls(log.debug()); |
603 | CodeCache::print_summary(&ls, false); |
604 | } |
605 | log_sweep("finished" ); |
606 | |
607 | // Sweeper is the only case where memory is released, check here if it |
608 | // is time to restart the compiler. Only checking if there is a certain |
609 | // amount of free memory in the code cache might lead to re-enabling |
610 | // compilation although no memory has been released. For example, there are |
611 | // cases when compilation was disabled although there is 4MB (or more) free |
612 | // memory in the code cache. The reason is code cache fragmentation. Therefore, |
613 | // it only makes sense to re-enable compilation if we have actually freed memory. |
614 | // Note that typically several kB are released for sweeping 16MB of the code |
615 | // cache. As a result, 'freed_memory' > 0 to restart the compiler. |
616 | if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { |
617 | CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); |
618 | log.debug("restart compiler" ); |
619 | log_sweep("restart_compiler" ); |
620 | } |
621 | } |
622 | |
623 | /** |
624 | * This function updates the sweeper statistics that keep track of nmethods |
625 | * state changes. If there is 'enough' state change, the sweeper is invoked |
626 | * as soon as possible. There can be data races on _bytes_changed. The data |
627 | * races are benign, since it does not matter if we loose a couple of bytes. |
628 | * In the worst case we call the sweeper a little later. Also, we are guaranteed |
629 | * to invoke the sweeper if the code cache gets full. |
630 | */ |
631 | void NMethodSweeper::report_state_change(nmethod* nm) { |
632 | _bytes_changed += nm->total_size(); |
633 | possibly_enable_sweeper(); |
634 | } |
635 | |
636 | /** |
637 | * Function determines if there was 'enough' state change in the code cache to invoke |
638 | * the sweeper again. Currently, we determine 'enough' as more than 1% state change in |
639 | * the code cache since the last sweep. |
640 | */ |
641 | void NMethodSweeper::possibly_enable_sweeper() { |
642 | double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100; |
643 | if (percent_changed > 1.0) { |
644 | _should_sweep = true; |
645 | } |
646 | } |
647 | |
648 | class CompiledMethodMarker: public StackObj { |
649 | private: |
650 | CodeCacheSweeperThread* _thread; |
651 | public: |
652 | CompiledMethodMarker(CompiledMethod* cm) { |
653 | JavaThread* current = JavaThread::current(); |
654 | assert (current->is_Code_cache_sweeper_thread(), "Must be" ); |
655 | _thread = (CodeCacheSweeperThread*)current; |
656 | if (!cm->is_zombie() && !cm->is_unloading()) { |
657 | // Only expose live nmethods for scanning |
658 | _thread->set_scanned_compiled_method(cm); |
659 | } |
660 | } |
661 | ~CompiledMethodMarker() { |
662 | _thread->set_scanned_compiled_method(NULL); |
663 | } |
664 | }; |
665 | |
666 | NMethodSweeper::MethodStateChange NMethodSweeper::process_compiled_method(CompiledMethod* cm) { |
667 | assert(cm != NULL, "sanity" ); |
668 | assert(!CodeCache_lock->owned_by_self(), "just checking" ); |
669 | |
670 | MethodStateChange result = None; |
671 | // Make sure this nmethod doesn't get unloaded during the scan, |
672 | // since safepoints may happen during acquired below locks. |
673 | CompiledMethodMarker nmm(cm); |
674 | SWEEP(cm); |
675 | |
676 | // Skip methods that are currently referenced by the VM |
677 | if (cm->is_locked_by_vm()) { |
678 | // But still remember to clean-up inline caches for alive nmethods |
679 | if (cm->is_alive()) { |
680 | // Clean inline caches that point to zombie/non-entrant/unloaded nmethods |
681 | cm->cleanup_inline_caches(false); |
682 | SWEEP(cm); |
683 | } |
684 | return result; |
685 | } |
686 | |
687 | if (cm->is_zombie()) { |
688 | // All inline caches that referred to this nmethod were cleaned in the |
689 | // previous sweeper cycle. Now flush the nmethod from the code cache. |
690 | assert(!cm->is_locked_by_vm(), "must not flush locked Compiled Methods" ); |
691 | cm->flush(); |
692 | assert(result == None, "sanity" ); |
693 | result = Flushed; |
694 | } else if (cm->is_not_entrant()) { |
695 | // If there are no current activations of this method on the |
696 | // stack we can safely convert it to a zombie method |
697 | OrderAccess::loadload(); // _stack_traversal_mark and _state |
698 | if (cm->can_convert_to_zombie()) { |
699 | // Code cache state change is tracked in make_zombie() |
700 | cm->make_zombie(); |
701 | SWEEP(cm); |
702 | // The nmethod may have been locked by JVMTI after being made zombie (see |
703 | // JvmtiDeferredEvent::compiled_method_unload_event()). If so, we cannot |
704 | // flush the osr nmethod directly but have to wait for a later sweeper cycle. |
705 | if (cm->is_osr_method() && !cm->is_locked_by_vm()) { |
706 | // No inline caches will ever point to osr methods, so we can just remove it. |
707 | // Make sure that we unregistered the nmethod with the heap and flushed all |
708 | // dependencies before removing the nmethod (done in make_zombie()). |
709 | assert(cm->is_zombie(), "nmethod must be unregistered" ); |
710 | cm->flush(); |
711 | assert(result == None, "sanity" ); |
712 | result = Flushed; |
713 | } else { |
714 | assert(result == None, "sanity" ); |
715 | result = MadeZombie; |
716 | assert(cm->is_zombie(), "nmethod must be zombie" ); |
717 | } |
718 | } else { |
719 | // Still alive, clean up its inline caches |
720 | cm->cleanup_inline_caches(false); |
721 | SWEEP(cm); |
722 | } |
723 | } else if (cm->is_unloaded()) { |
724 | // Code is unloaded, so there are no activations on the stack. |
725 | // Convert the nmethod to zombie or flush it directly in the OSR case. |
726 | if (cm->is_osr_method()) { |
727 | SWEEP(cm); |
728 | // No inline caches will ever point to osr methods, so we can just remove it |
729 | cm->flush(); |
730 | assert(result == None, "sanity" ); |
731 | result = Flushed; |
732 | } else { |
733 | // Code cache state change is tracked in make_zombie() |
734 | cm->make_zombie(); |
735 | SWEEP(cm); |
736 | assert(result == None, "sanity" ); |
737 | result = MadeZombie; |
738 | } |
739 | } else { |
740 | if (cm->is_nmethod()) { |
741 | possibly_flush((nmethod*)cm); |
742 | } |
743 | // Clean inline caches that point to zombie/non-entrant/unloaded nmethods |
744 | cm->cleanup_inline_caches(false); |
745 | SWEEP(cm); |
746 | } |
747 | return result; |
748 | } |
749 | |
750 | |
751 | void NMethodSweeper::possibly_flush(nmethod* nm) { |
752 | if (UseCodeCacheFlushing) { |
753 | if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed() && !nm->is_unloading()) { |
754 | bool make_not_entrant = false; |
755 | |
756 | // Do not make native methods not-entrant |
757 | nm->dec_hotness_counter(); |
758 | // Get the initial value of the hotness counter. This value depends on the |
759 | // ReservedCodeCacheSize |
760 | int reset_val = hotness_counter_reset_val(); |
761 | int time_since_reset = reset_val - nm->hotness_counter(); |
762 | int code_blob_type = CodeCache::get_code_blob_type(nm); |
763 | double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity); |
764 | // The less free space in the code cache we have - the bigger reverse_free_ratio() is. |
765 | // I.e., 'threshold' increases with lower available space in the code cache and a higher |
766 | // NmethodSweepActivity. If the current hotness counter - which decreases from its initial |
767 | // value until it is reset by stack walking - is smaller than the computed threshold, the |
768 | // corresponding nmethod is considered for removal. |
769 | if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) { |
770 | // A method is marked as not-entrant if the method is |
771 | // 1) 'old enough': nm->hotness_counter() < threshold |
772 | // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush) |
773 | // The second condition is necessary if we are dealing with very small code cache |
774 | // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. |
775 | // The second condition ensures that methods are not immediately made not-entrant |
776 | // after compilation. |
777 | make_not_entrant = true; |
778 | } |
779 | |
780 | // The stack-scanning low-cost detection may not see the method was used (which can happen for |
781 | // flat profiles). Check the age counter for possible data. |
782 | if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) { |
783 | MethodCounters* mc = nm->method()->get_method_counters(Thread::current()); |
784 | if (mc != NULL) { |
785 | // Snapshot the value as it's changed concurrently |
786 | int age = mc->nmethod_age(); |
787 | if (MethodCounters::is_nmethod_hot(age)) { |
788 | // The method has gone through flushing, and it became relatively hot that it deopted |
789 | // before we could take a look at it. Give it more time to appear in the stack traces, |
790 | // proportional to the number of deopts. |
791 | MethodData* md = nm->method()->method_data(); |
792 | if (md != NULL && time_since_reset > (int)(MinPassesBeforeFlush * (md->tenure_traps() + 1))) { |
793 | // It's been long enough, we still haven't seen it on stack. |
794 | // Try to flush it, but enable counters the next time. |
795 | mc->reset_nmethod_age(); |
796 | } else { |
797 | make_not_entrant = false; |
798 | } |
799 | } else if (MethodCounters::is_nmethod_warm(age)) { |
800 | // Method has counters enabled, and the method was used within |
801 | // previous MinPassesBeforeFlush sweeps. Reset the counter. Stay in the existing |
802 | // compiled state. |
803 | mc->reset_nmethod_age(); |
804 | // delay the next check |
805 | nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); |
806 | make_not_entrant = false; |
807 | } else if (MethodCounters::is_nmethod_age_unset(age)) { |
808 | // No counters were used before. Set the counters to the detection |
809 | // limit value. If the method is going to be used again it will be compiled |
810 | // with counters that we're going to use for analysis the the next time. |
811 | mc->reset_nmethod_age(); |
812 | } else { |
813 | // Method was totally idle for 10 sweeps |
814 | // The counter already has the initial value, flush it and may be recompile |
815 | // later with counters |
816 | } |
817 | } |
818 | } |
819 | |
820 | if (make_not_entrant) { |
821 | nm->make_not_entrant(); |
822 | |
823 | // Code cache state change is tracked in make_not_entrant() |
824 | if (PrintMethodFlushing && Verbose) { |
825 | tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f" , |
826 | nm->compile_id(), p2i(nm), nm->hotness_counter(), reset_val, threshold); |
827 | } |
828 | } |
829 | } |
830 | } |
831 | } |
832 | |
833 | // Print out some state information about the current sweep and the |
834 | // state of the code cache if it's requested. |
835 | void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { |
836 | if (PrintMethodFlushing) { |
837 | ResourceMark rm; |
838 | stringStream s; |
839 | // Dump code cache state into a buffer before locking the tty, |
840 | // because log_state() will use locks causing lock conflicts. |
841 | CodeCache::log_state(&s); |
842 | |
843 | ttyLocker ttyl; |
844 | tty->print("### sweeper: %s " , msg); |
845 | if (format != NULL) { |
846 | va_list ap; |
847 | va_start(ap, format); |
848 | tty->vprint(format, ap); |
849 | va_end(ap); |
850 | } |
851 | tty->print_cr("%s" , s.as_string()); |
852 | } |
853 | |
854 | if (LogCompilation && (xtty != NULL)) { |
855 | ResourceMark rm; |
856 | stringStream s; |
857 | // Dump code cache state into a buffer before locking the tty, |
858 | // because log_state() will use locks causing lock conflicts. |
859 | CodeCache::log_state(&s); |
860 | |
861 | ttyLocker ttyl; |
862 | xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' " , msg, (intx)traversal_count()); |
863 | if (format != NULL) { |
864 | va_list ap; |
865 | va_start(ap, format); |
866 | xtty->vprint(format, ap); |
867 | va_end(ap); |
868 | } |
869 | xtty->print("%s" , s.as_string()); |
870 | xtty->stamp(); |
871 | xtty->end_elem(); |
872 | } |
873 | } |
874 | |
875 | void NMethodSweeper::print(outputStream* out) { |
876 | ttyLocker ttyl; |
877 | out = (out == NULL) ? tty : out; |
878 | out->print_cr("Code cache sweeper statistics:" ); |
879 | out->print_cr(" Total sweep time: %1.0lf ms" , (double)_total_time_sweeping.value()/1000000); |
880 | out->print_cr(" Total number of full sweeps: %ld" , _total_nof_code_cache_sweeps); |
881 | out->print_cr(" Total number of flushed methods: %ld (thereof %ld C2 methods)" , _total_nof_methods_reclaimed, |
882 | _total_nof_c2_methods_reclaimed); |
883 | out->print_cr(" Total size of flushed methods: " SIZE_FORMAT " kB" , _total_flushed_size/K); |
884 | } |
885 | |