1 | /* |
2 | * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "aot/aotLoader.hpp" |
27 | #include "classfile/classLoaderDataGraph.hpp" |
28 | #include "classfile/symbolTable.hpp" |
29 | #include "classfile/stringTable.hpp" |
30 | #include "classfile/systemDictionary.hpp" |
31 | #include "classfile/vmSymbols.hpp" |
32 | #include "code/codeCache.hpp" |
33 | #include "code/icBuffer.hpp" |
34 | #include "gc/serial/defNewGeneration.hpp" |
35 | #include "gc/shared/adaptiveSizePolicy.hpp" |
36 | #include "gc/shared/cardTableBarrierSet.hpp" |
37 | #include "gc/shared/cardTableRS.hpp" |
38 | #include "gc/shared/collectedHeap.inline.hpp" |
39 | #include "gc/shared/collectorCounters.hpp" |
40 | #include "gc/shared/gcId.hpp" |
41 | #include "gc/shared/gcLocker.hpp" |
42 | #include "gc/shared/gcPolicyCounters.hpp" |
43 | #include "gc/shared/gcTrace.hpp" |
44 | #include "gc/shared/gcTraceTime.inline.hpp" |
45 | #include "gc/shared/genArguments.hpp" |
46 | #include "gc/shared/gcVMOperations.hpp" |
47 | #include "gc/shared/genCollectedHeap.hpp" |
48 | #include "gc/shared/genOopClosures.inline.hpp" |
49 | #include "gc/shared/generationSpec.hpp" |
50 | #include "gc/shared/oopStorageParState.inline.hpp" |
51 | #include "gc/shared/scavengableNMethods.hpp" |
52 | #include "gc/shared/space.hpp" |
53 | #include "gc/shared/strongRootsScope.hpp" |
54 | #include "gc/shared/weakProcessor.hpp" |
55 | #include "gc/shared/workgroup.hpp" |
56 | #include "memory/filemap.hpp" |
57 | #include "memory/metaspaceCounters.hpp" |
58 | #include "memory/resourceArea.hpp" |
59 | #include "memory/universe.hpp" |
60 | #include "oops/oop.inline.hpp" |
61 | #include "runtime/biasedLocking.hpp" |
62 | #include "runtime/flags/flagSetting.hpp" |
63 | #include "runtime/handles.hpp" |
64 | #include "runtime/handles.inline.hpp" |
65 | #include "runtime/java.hpp" |
66 | #include "runtime/vmThread.hpp" |
67 | #include "services/management.hpp" |
68 | #include "services/memoryService.hpp" |
69 | #include "utilities/debug.hpp" |
70 | #include "utilities/formatBuffer.hpp" |
71 | #include "utilities/macros.hpp" |
72 | #include "utilities/stack.inline.hpp" |
73 | #include "utilities/vmError.hpp" |
74 | #if INCLUDE_JVMCI |
75 | #include "jvmci/jvmci.hpp" |
76 | #endif |
77 | |
78 | GenCollectedHeap::GenCollectedHeap(Generation::Name young, |
79 | Generation::Name old, |
80 | const char* policy_counters_name) : |
81 | CollectedHeap(), |
82 | _young_gen_spec(new GenerationSpec(young, |
83 | NewSize, |
84 | MaxNewSize, |
85 | GenAlignment)), |
86 | _old_gen_spec(new GenerationSpec(old, |
87 | OldSize, |
88 | MaxOldSize, |
89 | GenAlignment)), |
90 | _rem_set(NULL), |
91 | _soft_ref_gen_policy(), |
92 | _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)), |
93 | _full_collections_completed(0), |
94 | _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)) { |
95 | } |
96 | |
97 | jint GenCollectedHeap::initialize() { |
98 | // While there are no constraints in the GC code that HeapWordSize |
99 | // be any particular value, there are multiple other areas in the |
100 | // system which believe this to be true (e.g. oop->object_size in some |
101 | // cases incorrectly returns the size in wordSize units rather than |
102 | // HeapWordSize). |
103 | guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize" ); |
104 | |
105 | // Allocate space for the heap. |
106 | |
107 | char* heap_address; |
108 | ReservedSpace heap_rs; |
109 | |
110 | heap_address = allocate(HeapAlignment, &heap_rs); |
111 | |
112 | if (!heap_rs.is_reserved()) { |
113 | vm_shutdown_during_initialization( |
114 | "Could not reserve enough space for object heap" ); |
115 | return JNI_ENOMEM; |
116 | } |
117 | |
118 | initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); |
119 | |
120 | _rem_set = create_rem_set(reserved_region()); |
121 | _rem_set->initialize(); |
122 | CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set); |
123 | bs->initialize(); |
124 | BarrierSet::set_barrier_set(bs); |
125 | |
126 | ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false); |
127 | _young_gen = _young_gen_spec->init(young_rs, rem_set()); |
128 | heap_rs = heap_rs.last_part(_young_gen_spec->max_size()); |
129 | |
130 | ReservedSpace old_rs = heap_rs.first_part(_old_gen_spec->max_size(), false, false); |
131 | _old_gen = _old_gen_spec->init(old_rs, rem_set()); |
132 | clear_incremental_collection_failed(); |
133 | |
134 | return JNI_OK; |
135 | } |
136 | |
137 | CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) { |
138 | return new CardTableRS(reserved_region, false /* scan_concurrently */); |
139 | } |
140 | |
141 | void GenCollectedHeap::initialize_size_policy(size_t init_eden_size, |
142 | size_t init_promo_size, |
143 | size_t init_survivor_size) { |
144 | const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0; |
145 | _size_policy = new AdaptiveSizePolicy(init_eden_size, |
146 | init_promo_size, |
147 | init_survivor_size, |
148 | max_gc_pause_sec, |
149 | GCTimeRatio); |
150 | } |
151 | |
152 | char* GenCollectedHeap::allocate(size_t alignment, |
153 | ReservedSpace* heap_rs){ |
154 | // Now figure out the total size. |
155 | const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); |
156 | assert(alignment % pageSize == 0, "Must be" ); |
157 | |
158 | // Check for overflow. |
159 | size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size(); |
160 | if (total_reserved < _young_gen_spec->max_size()) { |
161 | vm_exit_during_initialization("The size of the object heap + VM data exceeds " |
162 | "the maximum representable size" ); |
163 | } |
164 | assert(total_reserved % alignment == 0, |
165 | "Gen size; total_reserved=" SIZE_FORMAT ", alignment=" |
166 | SIZE_FORMAT, total_reserved, alignment); |
167 | |
168 | *heap_rs = Universe::reserve_heap(total_reserved, alignment); |
169 | |
170 | os::trace_page_sizes("Heap" , |
171 | MinHeapSize, |
172 | total_reserved, |
173 | alignment, |
174 | heap_rs->base(), |
175 | heap_rs->size()); |
176 | |
177 | return heap_rs->base(); |
178 | } |
179 | |
180 | class GenIsScavengable : public BoolObjectClosure { |
181 | public: |
182 | bool do_object_b(oop obj) { |
183 | return GenCollectedHeap::heap()->is_in_young(obj); |
184 | } |
185 | }; |
186 | |
187 | static GenIsScavengable _is_scavengable; |
188 | |
189 | void GenCollectedHeap::post_initialize() { |
190 | CollectedHeap::post_initialize(); |
191 | ref_processing_init(); |
192 | |
193 | DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen; |
194 | |
195 | initialize_size_policy(def_new_gen->eden()->capacity(), |
196 | _old_gen->capacity(), |
197 | def_new_gen->from()->capacity()); |
198 | |
199 | MarkSweep::initialize(); |
200 | |
201 | ScavengableNMethods::initialize(&_is_scavengable); |
202 | } |
203 | |
204 | void GenCollectedHeap::ref_processing_init() { |
205 | _young_gen->ref_processor_init(); |
206 | _old_gen->ref_processor_init(); |
207 | } |
208 | |
209 | GenerationSpec* GenCollectedHeap::young_gen_spec() const { |
210 | return _young_gen_spec; |
211 | } |
212 | |
213 | GenerationSpec* GenCollectedHeap::old_gen_spec() const { |
214 | return _old_gen_spec; |
215 | } |
216 | |
217 | size_t GenCollectedHeap::capacity() const { |
218 | return _young_gen->capacity() + _old_gen->capacity(); |
219 | } |
220 | |
221 | size_t GenCollectedHeap::used() const { |
222 | return _young_gen->used() + _old_gen->used(); |
223 | } |
224 | |
225 | void GenCollectedHeap::save_used_regions() { |
226 | _old_gen->save_used_region(); |
227 | _young_gen->save_used_region(); |
228 | } |
229 | |
230 | size_t GenCollectedHeap::max_capacity() const { |
231 | return _young_gen->max_capacity() + _old_gen->max_capacity(); |
232 | } |
233 | |
234 | // Update the _full_collections_completed counter |
235 | // at the end of a stop-world full GC. |
236 | unsigned int GenCollectedHeap::update_full_collections_completed() { |
237 | MonitorLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
238 | assert(_full_collections_completed <= _total_full_collections, |
239 | "Can't complete more collections than were started" ); |
240 | _full_collections_completed = _total_full_collections; |
241 | ml.notify_all(); |
242 | return _full_collections_completed; |
243 | } |
244 | |
245 | // Update the _full_collections_completed counter, as appropriate, |
246 | // at the end of a concurrent GC cycle. Note the conditional update |
247 | // below to allow this method to be called by a concurrent collector |
248 | // without synchronizing in any manner with the VM thread (which |
249 | // may already have initiated a STW full collection "concurrently"). |
250 | unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { |
251 | MonitorLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
252 | assert((_full_collections_completed <= _total_full_collections) && |
253 | (count <= _total_full_collections), |
254 | "Can't complete more collections than were started" ); |
255 | if (count > _full_collections_completed) { |
256 | _full_collections_completed = count; |
257 | ml.notify_all(); |
258 | } |
259 | return _full_collections_completed; |
260 | } |
261 | |
262 | // Return true if any of the following is true: |
263 | // . the allocation won't fit into the current young gen heap |
264 | // . gc locker is occupied (jni critical section) |
265 | // . heap memory is tight -- the most recent previous collection |
266 | // was a full collection because a partial collection (would |
267 | // have) failed and is likely to fail again |
268 | bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const { |
269 | size_t young_capacity = _young_gen->capacity_before_gc(); |
270 | return (word_size > heap_word_size(young_capacity)) |
271 | || GCLocker::is_active_and_needs_gc() |
272 | || incremental_collection_failed(); |
273 | } |
274 | |
275 | HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool is_tlab) { |
276 | HeapWord* result = NULL; |
277 | if (_old_gen->should_allocate(size, is_tlab)) { |
278 | result = _old_gen->expand_and_allocate(size, is_tlab); |
279 | } |
280 | if (result == NULL) { |
281 | if (_young_gen->should_allocate(size, is_tlab)) { |
282 | result = _young_gen->expand_and_allocate(size, is_tlab); |
283 | } |
284 | } |
285 | assert(result == NULL || is_in_reserved(result), "result not in heap" ); |
286 | return result; |
287 | } |
288 | |
289 | HeapWord* GenCollectedHeap::mem_allocate_work(size_t size, |
290 | bool is_tlab, |
291 | bool* gc_overhead_limit_was_exceeded) { |
292 | // In general gc_overhead_limit_was_exceeded should be false so |
293 | // set it so here and reset it to true only if the gc time |
294 | // limit is being exceeded as checked below. |
295 | *gc_overhead_limit_was_exceeded = false; |
296 | |
297 | HeapWord* result = NULL; |
298 | |
299 | // Loop until the allocation is satisfied, or unsatisfied after GC. |
300 | for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { |
301 | HandleMark hm; // Discard any handles allocated in each iteration. |
302 | |
303 | // First allocation attempt is lock-free. |
304 | Generation *young = _young_gen; |
305 | assert(young->supports_inline_contig_alloc(), |
306 | "Otherwise, must do alloc within heap lock" ); |
307 | if (young->should_allocate(size, is_tlab)) { |
308 | result = young->par_allocate(size, is_tlab); |
309 | if (result != NULL) { |
310 | assert(is_in_reserved(result), "result not in heap" ); |
311 | return result; |
312 | } |
313 | } |
314 | uint gc_count_before; // Read inside the Heap_lock locked region. |
315 | { |
316 | MutexLocker ml(Heap_lock); |
317 | log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation" ); |
318 | // Note that only large objects get a shot at being |
319 | // allocated in later generations. |
320 | bool first_only = !should_try_older_generation_allocation(size); |
321 | |
322 | result = attempt_allocation(size, is_tlab, first_only); |
323 | if (result != NULL) { |
324 | assert(is_in_reserved(result), "result not in heap" ); |
325 | return result; |
326 | } |
327 | |
328 | if (GCLocker::is_active_and_needs_gc()) { |
329 | if (is_tlab) { |
330 | return NULL; // Caller will retry allocating individual object. |
331 | } |
332 | if (!is_maximal_no_gc()) { |
333 | // Try and expand heap to satisfy request. |
334 | result = expand_heap_and_allocate(size, is_tlab); |
335 | // Result could be null if we are out of space. |
336 | if (result != NULL) { |
337 | return result; |
338 | } |
339 | } |
340 | |
341 | if (gclocker_stalled_count > GCLockerRetryAllocationCount) { |
342 | return NULL; // We didn't get to do a GC and we didn't get any memory. |
343 | } |
344 | |
345 | // If this thread is not in a jni critical section, we stall |
346 | // the requestor until the critical section has cleared and |
347 | // GC allowed. When the critical section clears, a GC is |
348 | // initiated by the last thread exiting the critical section; so |
349 | // we retry the allocation sequence from the beginning of the loop, |
350 | // rather than causing more, now probably unnecessary, GC attempts. |
351 | JavaThread* jthr = JavaThread::current(); |
352 | if (!jthr->in_critical()) { |
353 | MutexUnlocker mul(Heap_lock); |
354 | // Wait for JNI critical section to be exited |
355 | GCLocker::stall_until_clear(); |
356 | gclocker_stalled_count += 1; |
357 | continue; |
358 | } else { |
359 | if (CheckJNICalls) { |
360 | fatal("Possible deadlock due to allocating while" |
361 | " in jni critical section" ); |
362 | } |
363 | return NULL; |
364 | } |
365 | } |
366 | |
367 | // Read the gc count while the heap lock is held. |
368 | gc_count_before = total_collections(); |
369 | } |
370 | |
371 | VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); |
372 | VMThread::execute(&op); |
373 | if (op.prologue_succeeded()) { |
374 | result = op.result(); |
375 | if (op.gc_locked()) { |
376 | assert(result == NULL, "must be NULL if gc_locked() is true" ); |
377 | continue; // Retry and/or stall as necessary. |
378 | } |
379 | |
380 | // Allocation has failed and a collection |
381 | // has been done. If the gc time limit was exceeded the |
382 | // this time, return NULL so that an out-of-memory |
383 | // will be thrown. Clear gc_overhead_limit_exceeded |
384 | // so that the overhead exceeded does not persist. |
385 | |
386 | const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); |
387 | const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear(); |
388 | |
389 | if (limit_exceeded && softrefs_clear) { |
390 | *gc_overhead_limit_was_exceeded = true; |
391 | size_policy()->set_gc_overhead_limit_exceeded(false); |
392 | if (op.result() != NULL) { |
393 | CollectedHeap::fill_with_object(op.result(), size); |
394 | } |
395 | return NULL; |
396 | } |
397 | assert(result == NULL || is_in_reserved(result), |
398 | "result not in heap" ); |
399 | return result; |
400 | } |
401 | |
402 | // Give a warning if we seem to be looping forever. |
403 | if ((QueuedAllocationWarningCount > 0) && |
404 | (try_count % QueuedAllocationWarningCount == 0)) { |
405 | log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times," |
406 | " size=" SIZE_FORMAT " %s" , try_count, size, is_tlab ? "(TLAB)" : "" ); |
407 | } |
408 | } |
409 | } |
410 | |
411 | #ifndef PRODUCT |
412 | // Override of memory state checking method in CollectedHeap: |
413 | // Some collectors (CMS for example) can't have badHeapWordVal written |
414 | // in the first two words of an object. (For instance , in the case of |
415 | // CMS these words hold state used to synchronize between certain |
416 | // (concurrent) GC steps and direct allocating mutators.) |
417 | // The skip_header_HeapWords() method below, allows us to skip |
418 | // over the requisite number of HeapWord's. Note that (for |
419 | // generational collectors) this means that those many words are |
420 | // skipped in each object, irrespective of the generation in which |
421 | // that object lives. The resultant loss of precision seems to be |
422 | // harmless and the pain of avoiding that imprecision appears somewhat |
423 | // higher than we are prepared to pay for such rudimentary debugging |
424 | // support. |
425 | void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, |
426 | size_t size) { |
427 | if (CheckMemoryInitialization && ZapUnusedHeapArea) { |
428 | // We are asked to check a size in HeapWords, |
429 | // but the memory is mangled in juint words. |
430 | juint* start = (juint*) (addr + skip_header_HeapWords()); |
431 | juint* end = (juint*) (addr + size); |
432 | for (juint* slot = start; slot < end; slot += 1) { |
433 | assert(*slot == badHeapWordVal, |
434 | "Found non badHeapWordValue in pre-allocation check" ); |
435 | } |
436 | } |
437 | } |
438 | #endif |
439 | |
440 | HeapWord* GenCollectedHeap::attempt_allocation(size_t size, |
441 | bool is_tlab, |
442 | bool first_only) { |
443 | HeapWord* res = NULL; |
444 | |
445 | if (_young_gen->should_allocate(size, is_tlab)) { |
446 | res = _young_gen->allocate(size, is_tlab); |
447 | if (res != NULL || first_only) { |
448 | return res; |
449 | } |
450 | } |
451 | |
452 | if (_old_gen->should_allocate(size, is_tlab)) { |
453 | res = _old_gen->allocate(size, is_tlab); |
454 | } |
455 | |
456 | return res; |
457 | } |
458 | |
459 | HeapWord* GenCollectedHeap::mem_allocate(size_t size, |
460 | bool* gc_overhead_limit_was_exceeded) { |
461 | return mem_allocate_work(size, |
462 | false /* is_tlab */, |
463 | gc_overhead_limit_was_exceeded); |
464 | } |
465 | |
466 | bool GenCollectedHeap::must_clear_all_soft_refs() { |
467 | return _gc_cause == GCCause::_metadata_GC_clear_soft_refs || |
468 | _gc_cause == GCCause::_wb_full_gc; |
469 | } |
470 | |
471 | void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size, |
472 | bool is_tlab, bool run_verification, bool clear_soft_refs, |
473 | bool restore_marks_for_biased_locking) { |
474 | FormatBuffer<> title("Collect gen: %s" , gen->short_name()); |
475 | GCTraceTime(Trace, gc, phases) t1(title); |
476 | TraceCollectorStats tcs(gen->counters()); |
477 | TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause()); |
478 | |
479 | gen->stat_record()->invocations++; |
480 | gen->stat_record()->accumulated_time.start(); |
481 | |
482 | // Must be done anew before each collection because |
483 | // a previous collection will do mangling and will |
484 | // change top of some spaces. |
485 | record_gen_tops_before_GC(); |
486 | |
487 | log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old" , gen->stat_record()->invocations, size * HeapWordSize); |
488 | |
489 | if (run_verification && VerifyBeforeGC) { |
490 | HandleMark hm; // Discard invalid handles created during verification |
491 | Universe::verify("Before GC" ); |
492 | } |
493 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
494 | |
495 | if (restore_marks_for_biased_locking) { |
496 | // We perform this mark word preservation work lazily |
497 | // because it's only at this point that we know whether we |
498 | // absolutely have to do it; we want to avoid doing it for |
499 | // scavenge-only collections where it's unnecessary |
500 | BiasedLocking::preserve_marks(); |
501 | } |
502 | |
503 | // Do collection work |
504 | { |
505 | // Note on ref discovery: For what appear to be historical reasons, |
506 | // GCH enables and disabled (by enqueing) refs discovery. |
507 | // In the future this should be moved into the generation's |
508 | // collect method so that ref discovery and enqueueing concerns |
509 | // are local to a generation. The collect method could return |
510 | // an appropriate indication in the case that notification on |
511 | // the ref lock was needed. This will make the treatment of |
512 | // weak refs more uniform (and indeed remove such concerns |
513 | // from GCH). XXX |
514 | |
515 | HandleMark hm; // Discard invalid handles created during gc |
516 | save_marks(); // save marks for all gens |
517 | // We want to discover references, but not process them yet. |
518 | // This mode is disabled in process_discovered_references if the |
519 | // generation does some collection work, or in |
520 | // enqueue_discovered_references if the generation returns |
521 | // without doing any work. |
522 | ReferenceProcessor* rp = gen->ref_processor(); |
523 | // If the discovery of ("weak") refs in this generation is |
524 | // atomic wrt other collectors in this configuration, we |
525 | // are guaranteed to have empty discovered ref lists. |
526 | if (rp->discovery_is_atomic()) { |
527 | rp->enable_discovery(); |
528 | rp->setup_policy(clear_soft_refs); |
529 | } else { |
530 | // collect() below will enable discovery as appropriate |
531 | } |
532 | gen->collect(full, clear_soft_refs, size, is_tlab); |
533 | if (!rp->enqueuing_is_done()) { |
534 | rp->disable_discovery(); |
535 | } else { |
536 | rp->set_enqueuing_is_done(false); |
537 | } |
538 | rp->verify_no_references_recorded(); |
539 | } |
540 | |
541 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
542 | |
543 | gen->stat_record()->accumulated_time.stop(); |
544 | |
545 | update_gc_stats(gen, full); |
546 | |
547 | if (run_verification && VerifyAfterGC) { |
548 | HandleMark hm; // Discard invalid handles created during verification |
549 | Universe::verify("After GC" ); |
550 | } |
551 | } |
552 | |
553 | void GenCollectedHeap::do_collection(bool full, |
554 | bool clear_all_soft_refs, |
555 | size_t size, |
556 | bool is_tlab, |
557 | GenerationType max_generation) { |
558 | ResourceMark rm; |
559 | DEBUG_ONLY(Thread* my_thread = Thread::current();) |
560 | |
561 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint" ); |
562 | assert(my_thread->is_VM_thread() || |
563 | my_thread->is_ConcurrentGC_thread(), |
564 | "incorrect thread type capability" ); |
565 | assert(Heap_lock->is_locked(), |
566 | "the requesting thread should have the Heap_lock" ); |
567 | guarantee(!is_gc_active(), "collection is not reentrant" ); |
568 | |
569 | if (GCLocker::check_active_before_gc()) { |
570 | return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
571 | } |
572 | |
573 | const bool do_clear_all_soft_refs = clear_all_soft_refs || |
574 | soft_ref_policy()->should_clear_all_soft_refs(); |
575 | |
576 | ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy()); |
577 | |
578 | const size_t metadata_prev_used = MetaspaceUtils::used_bytes(); |
579 | |
580 | |
581 | FlagSetting fl(_is_gc_active, true); |
582 | |
583 | bool complete = full && (max_generation == OldGen); |
584 | bool old_collects_young = complete && !ScavengeBeforeFullGC; |
585 | bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab); |
586 | |
587 | size_t young_prev_used = _young_gen->used(); |
588 | size_t old_prev_used = _old_gen->used(); |
589 | |
590 | bool run_verification = total_collections() >= VerifyGCStartAt; |
591 | bool prepared_for_verification = false; |
592 | bool do_full_collection = false; |
593 | |
594 | if (do_young_collection) { |
595 | GCIdMark gc_id_mark; |
596 | GCTraceCPUTime tcpu; |
597 | GCTraceTime(Info, gc) t("Pause Young" , NULL, gc_cause(), true); |
598 | |
599 | print_heap_before_gc(); |
600 | |
601 | if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { |
602 | prepare_for_verify(); |
603 | prepared_for_verification = true; |
604 | } |
605 | |
606 | gc_prologue(complete); |
607 | increment_total_collections(complete); |
608 | |
609 | collect_generation(_young_gen, |
610 | full, |
611 | size, |
612 | is_tlab, |
613 | run_verification && VerifyGCLevel <= 0, |
614 | do_clear_all_soft_refs, |
615 | false); |
616 | |
617 | if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) && |
618 | size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) { |
619 | // Allocation request was met by young GC. |
620 | size = 0; |
621 | } |
622 | |
623 | // Ask if young collection is enough. If so, do the final steps for young collection, |
624 | // and fallthrough to the end. |
625 | do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation); |
626 | if (!do_full_collection) { |
627 | // Adjust generation sizes. |
628 | _young_gen->compute_new_size(); |
629 | |
630 | print_heap_change(young_prev_used, old_prev_used); |
631 | MetaspaceUtils::print_metaspace_change(metadata_prev_used); |
632 | |
633 | // Track memory usage and detect low memory after GC finishes |
634 | MemoryService::track_memory_usage(); |
635 | |
636 | gc_epilogue(complete); |
637 | } |
638 | |
639 | print_heap_after_gc(); |
640 | |
641 | } else { |
642 | // No young collection, ask if we need to perform Full collection. |
643 | do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation); |
644 | } |
645 | |
646 | if (do_full_collection) { |
647 | GCIdMark gc_id_mark; |
648 | GCTraceCPUTime tcpu; |
649 | GCTraceTime(Info, gc) t("Pause Full" , NULL, gc_cause(), true); |
650 | |
651 | print_heap_before_gc(); |
652 | |
653 | if (!prepared_for_verification && run_verification && |
654 | VerifyGCLevel <= 1 && VerifyBeforeGC) { |
655 | prepare_for_verify(); |
656 | } |
657 | |
658 | if (!do_young_collection) { |
659 | gc_prologue(complete); |
660 | increment_total_collections(complete); |
661 | } |
662 | |
663 | // Accounting quirk: total full collections would be incremented when "complete" |
664 | // is set, by calling increment_total_collections above. However, we also need to |
665 | // account Full collections that had "complete" unset. |
666 | if (!complete) { |
667 | increment_total_full_collections(); |
668 | } |
669 | |
670 | collect_generation(_old_gen, |
671 | full, |
672 | size, |
673 | is_tlab, |
674 | run_verification && VerifyGCLevel <= 1, |
675 | do_clear_all_soft_refs, |
676 | true); |
677 | |
678 | // Adjust generation sizes. |
679 | _old_gen->compute_new_size(); |
680 | _young_gen->compute_new_size(); |
681 | |
682 | // Delete metaspaces for unloaded class loaders and clean up loader_data graph |
683 | ClassLoaderDataGraph::purge(); |
684 | MetaspaceUtils::verify_metrics(); |
685 | // Resize the metaspace capacity after full collections |
686 | MetaspaceGC::compute_new_size(); |
687 | update_full_collections_completed(); |
688 | |
689 | print_heap_change(young_prev_used, old_prev_used); |
690 | MetaspaceUtils::print_metaspace_change(metadata_prev_used); |
691 | |
692 | // Track memory usage and detect low memory after GC finishes |
693 | MemoryService::track_memory_usage(); |
694 | |
695 | // Need to tell the epilogue code we are done with Full GC, regardless what was |
696 | // the initial value for "complete" flag. |
697 | gc_epilogue(true); |
698 | |
699 | BiasedLocking::restore_marks(); |
700 | |
701 | print_heap_after_gc(); |
702 | } |
703 | |
704 | #ifdef TRACESPINNING |
705 | ParallelTaskTerminator::print_termination_counts(); |
706 | #endif |
707 | } |
708 | |
709 | bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab, |
710 | GenCollectedHeap::GenerationType max_gen) const { |
711 | return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab); |
712 | } |
713 | |
714 | void GenCollectedHeap::register_nmethod(nmethod* nm) { |
715 | ScavengableNMethods::register_nmethod(nm); |
716 | } |
717 | |
718 | void GenCollectedHeap::unregister_nmethod(nmethod* nm) { |
719 | ScavengableNMethods::unregister_nmethod(nm); |
720 | } |
721 | |
722 | void GenCollectedHeap::verify_nmethod(nmethod* nm) { |
723 | ScavengableNMethods::verify_nmethod(nm); |
724 | } |
725 | |
726 | void GenCollectedHeap::flush_nmethod(nmethod* nm) { |
727 | // Do nothing. |
728 | } |
729 | |
730 | void GenCollectedHeap::prune_scavengable_nmethods() { |
731 | ScavengableNMethods::prune_nmethods(); |
732 | } |
733 | |
734 | HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { |
735 | GCCauseSetter x(this, GCCause::_allocation_failure); |
736 | HeapWord* result = NULL; |
737 | |
738 | assert(size != 0, "Precondition violated" ); |
739 | if (GCLocker::is_active_and_needs_gc()) { |
740 | // GC locker is active; instead of a collection we will attempt |
741 | // to expand the heap, if there's room for expansion. |
742 | if (!is_maximal_no_gc()) { |
743 | result = expand_heap_and_allocate(size, is_tlab); |
744 | } |
745 | return result; // Could be null if we are out of space. |
746 | } else if (!incremental_collection_will_fail(false /* don't consult_young */)) { |
747 | // Do an incremental collection. |
748 | do_collection(false, // full |
749 | false, // clear_all_soft_refs |
750 | size, // size |
751 | is_tlab, // is_tlab |
752 | GenCollectedHeap::OldGen); // max_generation |
753 | } else { |
754 | log_trace(gc)(" :: Trying full because partial may fail :: " ); |
755 | // Try a full collection; see delta for bug id 6266275 |
756 | // for the original code and why this has been simplified |
757 | // with from-space allocation criteria modified and |
758 | // such allocation moved out of the safepoint path. |
759 | do_collection(true, // full |
760 | false, // clear_all_soft_refs |
761 | size, // size |
762 | is_tlab, // is_tlab |
763 | GenCollectedHeap::OldGen); // max_generation |
764 | } |
765 | |
766 | result = attempt_allocation(size, is_tlab, false /*first_only*/); |
767 | |
768 | if (result != NULL) { |
769 | assert(is_in_reserved(result), "result not in heap" ); |
770 | return result; |
771 | } |
772 | |
773 | // OK, collection failed, try expansion. |
774 | result = expand_heap_and_allocate(size, is_tlab); |
775 | if (result != NULL) { |
776 | return result; |
777 | } |
778 | |
779 | // If we reach this point, we're really out of memory. Try every trick |
780 | // we can to reclaim memory. Force collection of soft references. Force |
781 | // a complete compaction of the heap. Any additional methods for finding |
782 | // free memory should be here, especially if they are expensive. If this |
783 | // attempt fails, an OOM exception will be thrown. |
784 | { |
785 | UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted |
786 | |
787 | do_collection(true, // full |
788 | true, // clear_all_soft_refs |
789 | size, // size |
790 | is_tlab, // is_tlab |
791 | GenCollectedHeap::OldGen); // max_generation |
792 | } |
793 | |
794 | result = attempt_allocation(size, is_tlab, false /* first_only */); |
795 | if (result != NULL) { |
796 | assert(is_in_reserved(result), "result not in heap" ); |
797 | return result; |
798 | } |
799 | |
800 | assert(!soft_ref_policy()->should_clear_all_soft_refs(), |
801 | "Flag should have been handled and cleared prior to this point" ); |
802 | |
803 | // What else? We might try synchronous finalization later. If the total |
804 | // space available is large enough for the allocation, then a more |
805 | // complete compaction phase than we've tried so far might be |
806 | // appropriate. |
807 | return NULL; |
808 | } |
809 | |
810 | #ifdef ASSERT |
811 | class AssertNonScavengableClosure: public OopClosure { |
812 | public: |
813 | virtual void do_oop(oop* p) { |
814 | assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p), |
815 | "Referent should not be scavengable." ); } |
816 | virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
817 | }; |
818 | static AssertNonScavengableClosure assert_is_non_scavengable_closure; |
819 | #endif |
820 | |
821 | void GenCollectedHeap::process_roots(StrongRootsScope* scope, |
822 | ScanningOption so, |
823 | OopClosure* strong_roots, |
824 | CLDClosure* strong_cld_closure, |
825 | CLDClosure* weak_cld_closure, |
826 | CodeBlobToOopClosure* code_roots) { |
827 | // General roots. |
828 | assert(code_roots != NULL, "code root closure should always be set" ); |
829 | // _n_termination for _process_strong_tasks should be set up stream |
830 | // in a method not running in a GC worker. Otherwise the GC worker |
831 | // could be trying to change the termination condition while the task |
832 | // is executing in another GC worker. |
833 | |
834 | if (_process_strong_tasks->try_claim_task(GCH_PS_ClassLoaderDataGraph_oops_do)) { |
835 | ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure); |
836 | } |
837 | |
838 | // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway |
839 | CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; |
840 | |
841 | bool is_par = scope->n_threads() > 1; |
842 | Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p); |
843 | |
844 | if (_process_strong_tasks->try_claim_task(GCH_PS_Universe_oops_do)) { |
845 | Universe::oops_do(strong_roots); |
846 | } |
847 | // Global (strong) JNI handles |
848 | if (_process_strong_tasks->try_claim_task(GCH_PS_JNIHandles_oops_do)) { |
849 | JNIHandles::oops_do(strong_roots); |
850 | } |
851 | |
852 | if (_process_strong_tasks->try_claim_task(GCH_PS_ObjectSynchronizer_oops_do)) { |
853 | ObjectSynchronizer::oops_do(strong_roots); |
854 | } |
855 | if (_process_strong_tasks->try_claim_task(GCH_PS_Management_oops_do)) { |
856 | Management::oops_do(strong_roots); |
857 | } |
858 | if (_process_strong_tasks->try_claim_task(GCH_PS_jvmti_oops_do)) { |
859 | JvmtiExport::oops_do(strong_roots); |
860 | } |
861 | #if INCLUDE_AOT |
862 | if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) { |
863 | AOTLoader::oops_do(strong_roots); |
864 | } |
865 | #endif |
866 | #if INCLUDE_JVMCI |
867 | if (EnableJVMCI && _process_strong_tasks->try_claim_task(GCH_PS_jvmci_oops_do)) { |
868 | JVMCI::oops_do(strong_roots); |
869 | } |
870 | #endif |
871 | if (_process_strong_tasks->try_claim_task(GCH_PS_SystemDictionary_oops_do)) { |
872 | SystemDictionary::oops_do(strong_roots); |
873 | } |
874 | |
875 | if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) { |
876 | if (so & SO_ScavengeCodeCache) { |
877 | assert(code_roots != NULL, "must supply closure for code cache" ); |
878 | |
879 | // We only visit parts of the CodeCache when scavenging. |
880 | ScavengableNMethods::nmethods_do(code_roots); |
881 | } |
882 | if (so & SO_AllCodeCache) { |
883 | assert(code_roots != NULL, "must supply closure for code cache" ); |
884 | |
885 | // CMSCollector uses this to do intermediate-strength collections. |
886 | // We scan the entire code cache, since CodeCache::do_unloading is not called. |
887 | CodeCache::blobs_do(code_roots); |
888 | } |
889 | // Verify that the code cache contents are not subject to |
890 | // movement by a scavenging collection. |
891 | DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations)); |
892 | DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); |
893 | } |
894 | } |
895 | |
896 | void GenCollectedHeap::young_process_roots(StrongRootsScope* scope, |
897 | OopsInGenClosure* root_closure, |
898 | OopsInGenClosure* old_gen_closure, |
899 | CLDClosure* cld_closure) { |
900 | MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations); |
901 | |
902 | process_roots(scope, SO_ScavengeCodeCache, root_closure, |
903 | cld_closure, cld_closure, &mark_code_closure); |
904 | |
905 | if (_process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) { |
906 | root_closure->reset_generation(); |
907 | } |
908 | |
909 | // When collection is parallel, all threads get to cooperate to do |
910 | // old generation scanning. |
911 | old_gen_closure->set_generation(_old_gen); |
912 | rem_set()->younger_refs_iterate(_old_gen, old_gen_closure, scope->n_threads()); |
913 | old_gen_closure->reset_generation(); |
914 | |
915 | _process_strong_tasks->all_tasks_completed(scope->n_threads()); |
916 | } |
917 | |
918 | void GenCollectedHeap::full_process_roots(StrongRootsScope* scope, |
919 | bool is_adjust_phase, |
920 | ScanningOption so, |
921 | bool only_strong_roots, |
922 | OopsInGenClosure* root_closure, |
923 | CLDClosure* cld_closure) { |
924 | MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase); |
925 | CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure; |
926 | |
927 | process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure); |
928 | _process_strong_tasks->all_tasks_completed(scope->n_threads()); |
929 | } |
930 | |
931 | void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { |
932 | WeakProcessor::oops_do(root_closure); |
933 | _young_gen->ref_processor()->weak_oops_do(root_closure); |
934 | _old_gen->ref_processor()->weak_oops_do(root_closure); |
935 | } |
936 | |
937 | bool GenCollectedHeap::no_allocs_since_save_marks() { |
938 | return _young_gen->no_allocs_since_save_marks() && |
939 | _old_gen->no_allocs_since_save_marks(); |
940 | } |
941 | |
942 | bool GenCollectedHeap::supports_inline_contig_alloc() const { |
943 | return _young_gen->supports_inline_contig_alloc(); |
944 | } |
945 | |
946 | HeapWord* volatile* GenCollectedHeap::top_addr() const { |
947 | return _young_gen->top_addr(); |
948 | } |
949 | |
950 | HeapWord** GenCollectedHeap::end_addr() const { |
951 | return _young_gen->end_addr(); |
952 | } |
953 | |
954 | // public collection interfaces |
955 | |
956 | void GenCollectedHeap::collect(GCCause::Cause cause) { |
957 | if (cause == GCCause::_wb_young_gc) { |
958 | // Young collection for the WhiteBox API. |
959 | collect(cause, YoungGen); |
960 | } else { |
961 | #ifdef ASSERT |
962 | if (cause == GCCause::_scavenge_alot) { |
963 | // Young collection only. |
964 | collect(cause, YoungGen); |
965 | } else { |
966 | // Stop-the-world full collection. |
967 | collect(cause, OldGen); |
968 | } |
969 | #else |
970 | // Stop-the-world full collection. |
971 | collect(cause, OldGen); |
972 | #endif |
973 | } |
974 | } |
975 | |
976 | void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) { |
977 | // The caller doesn't have the Heap_lock |
978 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock" ); |
979 | MutexLocker ml(Heap_lock); |
980 | collect_locked(cause, max_generation); |
981 | } |
982 | |
983 | void GenCollectedHeap::collect_locked(GCCause::Cause cause) { |
984 | // The caller has the Heap_lock |
985 | assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock" ); |
986 | collect_locked(cause, OldGen); |
987 | } |
988 | |
989 | // this is the private collection interface |
990 | // The Heap_lock is expected to be held on entry. |
991 | |
992 | void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) { |
993 | // Read the GC count while holding the Heap_lock |
994 | unsigned int gc_count_before = total_collections(); |
995 | unsigned int full_gc_count_before = total_full_collections(); |
996 | { |
997 | MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back |
998 | VM_GenCollectFull op(gc_count_before, full_gc_count_before, |
999 | cause, max_generation); |
1000 | VMThread::execute(&op); |
1001 | } |
1002 | } |
1003 | |
1004 | void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { |
1005 | do_full_collection(clear_all_soft_refs, OldGen); |
1006 | } |
1007 | |
1008 | void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, |
1009 | GenerationType last_generation) { |
1010 | GenerationType local_last_generation; |
1011 | if (!incremental_collection_will_fail(false /* don't consult_young */) && |
1012 | gc_cause() == GCCause::_gc_locker) { |
1013 | local_last_generation = YoungGen; |
1014 | } else { |
1015 | local_last_generation = last_generation; |
1016 | } |
1017 | |
1018 | do_collection(true, // full |
1019 | clear_all_soft_refs, // clear_all_soft_refs |
1020 | 0, // size |
1021 | false, // is_tlab |
1022 | local_last_generation); // last_generation |
1023 | // Hack XXX FIX ME !!! |
1024 | // A scavenge may not have been attempted, or may have |
1025 | // been attempted and failed, because the old gen was too full |
1026 | if (local_last_generation == YoungGen && gc_cause() == GCCause::_gc_locker && |
1027 | incremental_collection_will_fail(false /* don't consult_young */)) { |
1028 | log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed" ); |
1029 | // This time allow the old gen to be collected as well |
1030 | do_collection(true, // full |
1031 | clear_all_soft_refs, // clear_all_soft_refs |
1032 | 0, // size |
1033 | false, // is_tlab |
1034 | OldGen); // last_generation |
1035 | } |
1036 | } |
1037 | |
1038 | bool GenCollectedHeap::is_in_young(oop p) { |
1039 | bool result = ((HeapWord*)p) < _old_gen->reserved().start(); |
1040 | assert(result == _young_gen->is_in_reserved(p), |
1041 | "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p)); |
1042 | return result; |
1043 | } |
1044 | |
1045 | // Returns "TRUE" iff "p" points into the committed areas of the heap. |
1046 | bool GenCollectedHeap::is_in(const void* p) const { |
1047 | return _young_gen->is_in(p) || _old_gen->is_in(p); |
1048 | } |
1049 | |
1050 | #ifdef ASSERT |
1051 | // Don't implement this by using is_in_young(). This method is used |
1052 | // in some cases to check that is_in_young() is correct. |
1053 | bool GenCollectedHeap::is_in_partial_collection(const void* p) { |
1054 | assert(is_in_reserved(p) || p == NULL, |
1055 | "Does not work if address is non-null and outside of the heap" ); |
1056 | return p < _young_gen->reserved().end() && p != NULL; |
1057 | } |
1058 | #endif |
1059 | |
1060 | void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) { |
1061 | _young_gen->oop_iterate(cl); |
1062 | _old_gen->oop_iterate(cl); |
1063 | } |
1064 | |
1065 | void GenCollectedHeap::object_iterate(ObjectClosure* cl) { |
1066 | _young_gen->object_iterate(cl); |
1067 | _old_gen->object_iterate(cl); |
1068 | } |
1069 | |
1070 | void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { |
1071 | _young_gen->safe_object_iterate(cl); |
1072 | _old_gen->safe_object_iterate(cl); |
1073 | } |
1074 | |
1075 | Space* GenCollectedHeap::space_containing(const void* addr) const { |
1076 | Space* res = _young_gen->space_containing(addr); |
1077 | if (res != NULL) { |
1078 | return res; |
1079 | } |
1080 | res = _old_gen->space_containing(addr); |
1081 | assert(res != NULL, "Could not find containing space" ); |
1082 | return res; |
1083 | } |
1084 | |
1085 | HeapWord* GenCollectedHeap::block_start(const void* addr) const { |
1086 | assert(is_in_reserved(addr), "block_start of address outside of heap" ); |
1087 | if (_young_gen->is_in_reserved(addr)) { |
1088 | assert(_young_gen->is_in(addr), "addr should be in allocated part of generation" ); |
1089 | return _young_gen->block_start(addr); |
1090 | } |
1091 | |
1092 | assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address" ); |
1093 | assert(_old_gen->is_in(addr), "addr should be in allocated part of generation" ); |
1094 | return _old_gen->block_start(addr); |
1095 | } |
1096 | |
1097 | bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { |
1098 | assert(is_in_reserved(addr), "block_is_obj of address outside of heap" ); |
1099 | assert(block_start(addr) == addr, "addr must be a block start" ); |
1100 | if (_young_gen->is_in_reserved(addr)) { |
1101 | return _young_gen->block_is_obj(addr); |
1102 | } |
1103 | |
1104 | assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address" ); |
1105 | return _old_gen->block_is_obj(addr); |
1106 | } |
1107 | |
1108 | bool GenCollectedHeap::supports_tlab_allocation() const { |
1109 | assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!" ); |
1110 | return _young_gen->supports_tlab_allocation(); |
1111 | } |
1112 | |
1113 | size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { |
1114 | assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!" ); |
1115 | if (_young_gen->supports_tlab_allocation()) { |
1116 | return _young_gen->tlab_capacity(); |
1117 | } |
1118 | return 0; |
1119 | } |
1120 | |
1121 | size_t GenCollectedHeap::tlab_used(Thread* thr) const { |
1122 | assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!" ); |
1123 | if (_young_gen->supports_tlab_allocation()) { |
1124 | return _young_gen->tlab_used(); |
1125 | } |
1126 | return 0; |
1127 | } |
1128 | |
1129 | size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { |
1130 | assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!" ); |
1131 | if (_young_gen->supports_tlab_allocation()) { |
1132 | return _young_gen->unsafe_max_tlab_alloc(); |
1133 | } |
1134 | return 0; |
1135 | } |
1136 | |
1137 | HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size, |
1138 | size_t requested_size, |
1139 | size_t* actual_size) { |
1140 | bool gc_overhead_limit_was_exceeded; |
1141 | HeapWord* result = mem_allocate_work(requested_size /* size */, |
1142 | true /* is_tlab */, |
1143 | &gc_overhead_limit_was_exceeded); |
1144 | if (result != NULL) { |
1145 | *actual_size = requested_size; |
1146 | } |
1147 | |
1148 | return result; |
1149 | } |
1150 | |
1151 | // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size |
1152 | // from the list headed by "*prev_ptr". |
1153 | static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { |
1154 | bool first = true; |
1155 | size_t min_size = 0; // "first" makes this conceptually infinite. |
1156 | ScratchBlock **smallest_ptr, *smallest; |
1157 | ScratchBlock *cur = *prev_ptr; |
1158 | while (cur) { |
1159 | assert(*prev_ptr == cur, "just checking" ); |
1160 | if (first || cur->num_words < min_size) { |
1161 | smallest_ptr = prev_ptr; |
1162 | smallest = cur; |
1163 | min_size = smallest->num_words; |
1164 | first = false; |
1165 | } |
1166 | prev_ptr = &cur->next; |
1167 | cur = cur->next; |
1168 | } |
1169 | smallest = *smallest_ptr; |
1170 | *smallest_ptr = smallest->next; |
1171 | return smallest; |
1172 | } |
1173 | |
1174 | // Sort the scratch block list headed by res into decreasing size order, |
1175 | // and set "res" to the result. |
1176 | static void sort_scratch_list(ScratchBlock*& list) { |
1177 | ScratchBlock* sorted = NULL; |
1178 | ScratchBlock* unsorted = list; |
1179 | while (unsorted) { |
1180 | ScratchBlock *smallest = removeSmallestScratch(&unsorted); |
1181 | smallest->next = sorted; |
1182 | sorted = smallest; |
1183 | } |
1184 | list = sorted; |
1185 | } |
1186 | |
1187 | ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, |
1188 | size_t max_alloc_words) { |
1189 | ScratchBlock* res = NULL; |
1190 | _young_gen->contribute_scratch(res, requestor, max_alloc_words); |
1191 | _old_gen->contribute_scratch(res, requestor, max_alloc_words); |
1192 | sort_scratch_list(res); |
1193 | return res; |
1194 | } |
1195 | |
1196 | void GenCollectedHeap::release_scratch() { |
1197 | _young_gen->reset_scratch(); |
1198 | _old_gen->reset_scratch(); |
1199 | } |
1200 | |
1201 | class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { |
1202 | void do_generation(Generation* gen) { |
1203 | gen->prepare_for_verify(); |
1204 | } |
1205 | }; |
1206 | |
1207 | void GenCollectedHeap::prepare_for_verify() { |
1208 | ensure_parsability(false); // no need to retire TLABs |
1209 | GenPrepareForVerifyClosure blk; |
1210 | generation_iterate(&blk, false); |
1211 | } |
1212 | |
1213 | void GenCollectedHeap::generation_iterate(GenClosure* cl, |
1214 | bool old_to_young) { |
1215 | if (old_to_young) { |
1216 | cl->do_generation(_old_gen); |
1217 | cl->do_generation(_young_gen); |
1218 | } else { |
1219 | cl->do_generation(_young_gen); |
1220 | cl->do_generation(_old_gen); |
1221 | } |
1222 | } |
1223 | |
1224 | bool GenCollectedHeap::is_maximal_no_gc() const { |
1225 | return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc(); |
1226 | } |
1227 | |
1228 | void GenCollectedHeap::save_marks() { |
1229 | _young_gen->save_marks(); |
1230 | _old_gen->save_marks(); |
1231 | } |
1232 | |
1233 | GenCollectedHeap* GenCollectedHeap::heap() { |
1234 | CollectedHeap* heap = Universe::heap(); |
1235 | assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()" ); |
1236 | assert(heap->kind() == CollectedHeap::Serial || |
1237 | heap->kind() == CollectedHeap::CMS, "Invalid name" ); |
1238 | return (GenCollectedHeap*) heap; |
1239 | } |
1240 | |
1241 | #if INCLUDE_SERIALGC |
1242 | void GenCollectedHeap::prepare_for_compaction() { |
1243 | // Start by compacting into same gen. |
1244 | CompactPoint cp(_old_gen); |
1245 | _old_gen->prepare_for_compaction(&cp); |
1246 | _young_gen->prepare_for_compaction(&cp); |
1247 | } |
1248 | #endif // INCLUDE_SERIALGC |
1249 | |
1250 | void GenCollectedHeap::verify(VerifyOption option /* ignored */) { |
1251 | log_debug(gc, verify)("%s" , _old_gen->name()); |
1252 | _old_gen->verify(); |
1253 | |
1254 | log_debug(gc, verify)("%s" , _old_gen->name()); |
1255 | _young_gen->verify(); |
1256 | |
1257 | log_debug(gc, verify)("RemSet" ); |
1258 | rem_set()->verify(); |
1259 | } |
1260 | |
1261 | void GenCollectedHeap::print_on(outputStream* st) const { |
1262 | _young_gen->print_on(st); |
1263 | _old_gen->print_on(st); |
1264 | MetaspaceUtils::print_on(st); |
1265 | } |
1266 | |
1267 | void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { |
1268 | } |
1269 | |
1270 | void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { |
1271 | } |
1272 | |
1273 | void GenCollectedHeap::print_tracing_info() const { |
1274 | if (log_is_enabled(Debug, gc, heap, exit)) { |
1275 | LogStreamHandle(Debug, gc, heap, exit) lsh; |
1276 | _young_gen->print_summary_info_on(&lsh); |
1277 | _old_gen->print_summary_info_on(&lsh); |
1278 | } |
1279 | } |
1280 | |
1281 | void GenCollectedHeap::print_heap_change(size_t young_prev_used, size_t old_prev_used) const { |
1282 | log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)" , |
1283 | _young_gen->short_name(), young_prev_used / K, _young_gen->used() /K, _young_gen->capacity() /K); |
1284 | log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)" , |
1285 | _old_gen->short_name(), old_prev_used / K, _old_gen->used() /K, _old_gen->capacity() /K); |
1286 | } |
1287 | |
1288 | class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { |
1289 | private: |
1290 | bool _full; |
1291 | public: |
1292 | void do_generation(Generation* gen) { |
1293 | gen->gc_prologue(_full); |
1294 | } |
1295 | GenGCPrologueClosure(bool full) : _full(full) {}; |
1296 | }; |
1297 | |
1298 | void GenCollectedHeap::gc_prologue(bool full) { |
1299 | assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer" ); |
1300 | |
1301 | // Fill TLAB's and such |
1302 | ensure_parsability(true); // retire TLABs |
1303 | |
1304 | // Walk generations |
1305 | GenGCPrologueClosure blk(full); |
1306 | generation_iterate(&blk, false); // not old-to-young. |
1307 | }; |
1308 | |
1309 | class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { |
1310 | private: |
1311 | bool _full; |
1312 | public: |
1313 | void do_generation(Generation* gen) { |
1314 | gen->gc_epilogue(_full); |
1315 | } |
1316 | GenGCEpilogueClosure(bool full) : _full(full) {}; |
1317 | }; |
1318 | |
1319 | void GenCollectedHeap::gc_epilogue(bool full) { |
1320 | #if COMPILER2_OR_JVMCI |
1321 | assert(DerivedPointerTable::is_empty(), "derived pointer present" ); |
1322 | size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); |
1323 | guarantee(is_client_compilation_mode_vm() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps" ); |
1324 | #endif // COMPILER2_OR_JVMCI |
1325 | |
1326 | resize_all_tlabs(); |
1327 | |
1328 | GenGCEpilogueClosure blk(full); |
1329 | generation_iterate(&blk, false); // not old-to-young. |
1330 | |
1331 | if (!CleanChunkPoolAsync) { |
1332 | Chunk::clean_chunk_pool(); |
1333 | } |
1334 | |
1335 | MetaspaceCounters::update_performance_counters(); |
1336 | CompressedClassSpaceCounters::update_performance_counters(); |
1337 | }; |
1338 | |
1339 | #ifndef PRODUCT |
1340 | class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { |
1341 | private: |
1342 | public: |
1343 | void do_generation(Generation* gen) { |
1344 | gen->record_spaces_top(); |
1345 | } |
1346 | }; |
1347 | |
1348 | void GenCollectedHeap::record_gen_tops_before_GC() { |
1349 | if (ZapUnusedHeapArea) { |
1350 | GenGCSaveTopsBeforeGCClosure blk; |
1351 | generation_iterate(&blk, false); // not old-to-young. |
1352 | } |
1353 | } |
1354 | #endif // not PRODUCT |
1355 | |
1356 | class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { |
1357 | public: |
1358 | void do_generation(Generation* gen) { |
1359 | gen->ensure_parsability(); |
1360 | } |
1361 | }; |
1362 | |
1363 | void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { |
1364 | CollectedHeap::ensure_parsability(retire_tlabs); |
1365 | GenEnsureParsabilityClosure ep_cl; |
1366 | generation_iterate(&ep_cl, false); |
1367 | } |
1368 | |
1369 | oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, |
1370 | oop obj, |
1371 | size_t obj_size) { |
1372 | guarantee(old_gen == _old_gen, "We only get here with an old generation" ); |
1373 | assert(obj_size == (size_t)obj->size(), "bad obj_size passed in" ); |
1374 | HeapWord* result = NULL; |
1375 | |
1376 | result = old_gen->expand_and_allocate(obj_size, false); |
1377 | |
1378 | if (result != NULL) { |
1379 | Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); |
1380 | } |
1381 | return oop(result); |
1382 | } |
1383 | |
1384 | class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { |
1385 | jlong _time; // in ms |
1386 | jlong _now; // in ms |
1387 | |
1388 | public: |
1389 | GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } |
1390 | |
1391 | jlong time() { return _time; } |
1392 | |
1393 | void do_generation(Generation* gen) { |
1394 | _time = MIN2(_time, gen->time_of_last_gc(_now)); |
1395 | } |
1396 | }; |
1397 | |
1398 | jlong GenCollectedHeap::millis_since_last_gc() { |
1399 | // javaTimeNanos() is guaranteed to be monotonically non-decreasing |
1400 | // provided the underlying platform provides such a time source |
1401 | // (and it is bug free). So we still have to guard against getting |
1402 | // back a time later than 'now'. |
1403 | jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
1404 | GenTimeOfLastGCClosure tolgc_cl(now); |
1405 | // iterate over generations getting the oldest |
1406 | // time that a generation was collected |
1407 | generation_iterate(&tolgc_cl, false); |
1408 | |
1409 | jlong retVal = now - tolgc_cl.time(); |
1410 | if (retVal < 0) { |
1411 | log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT |
1412 | ". returning zero instead." , retVal); |
1413 | return 0; |
1414 | } |
1415 | return retVal; |
1416 | } |
1417 | |