1/*
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "gc/shared/allocTracer.hpp"
28#include "gc/shared/barrierSet.hpp"
29#include "gc/shared/collectedHeap.hpp"
30#include "gc/shared/collectedHeap.inline.hpp"
31#include "gc/shared/gcLocker.inline.hpp"
32#include "gc/shared/gcHeapSummary.hpp"
33#include "gc/shared/gcTrace.hpp"
34#include "gc/shared/gcTraceTime.inline.hpp"
35#include "gc/shared/gcVMOperations.hpp"
36#include "gc/shared/gcWhen.hpp"
37#include "gc/shared/memAllocator.hpp"
38#include "logging/log.hpp"
39#include "memory/metaspace.hpp"
40#include "memory/resourceArea.hpp"
41#include "memory/universe.hpp"
42#include "oops/instanceMirrorKlass.hpp"
43#include "oops/oop.inline.hpp"
44#include "runtime/handles.inline.hpp"
45#include "runtime/init.hpp"
46#include "runtime/thread.inline.hpp"
47#include "runtime/threadSMR.hpp"
48#include "runtime/vmThread.hpp"
49#include "services/heapDumper.hpp"
50#include "utilities/align.hpp"
51#include "utilities/copy.hpp"
52
53class ClassLoaderData;
54
55size_t CollectedHeap::_filler_array_max_size = 0;
56
57template <>
58void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
59 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
60 st->print_raw(m);
61}
62
63void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
64 if (!should_log()) {
65 return;
66 }
67
68 double timestamp = fetch_timestamp();
69 MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag);
70 int index = compute_log_index();
71 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
72 _records[index].timestamp = timestamp;
73 _records[index].data.is_before = before;
74 stringStream st(_records[index].data.buffer(), _records[index].data.size());
75
76 st.print_cr("{Heap %s GC invocations=%u (full %u):",
77 before ? "before" : "after",
78 heap->total_collections(),
79 heap->total_full_collections());
80
81 heap->print_on(&st);
82 st.print_cr("}");
83}
84
85size_t CollectedHeap::unused() const {
86 MutexLocker ml(Heap_lock);
87 return capacity() - used();
88}
89
90VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
91 size_t capacity_in_words = capacity() / HeapWordSize;
92
93 return VirtualSpaceSummary(
94 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
95}
96
97GCHeapSummary CollectedHeap::create_heap_summary() {
98 VirtualSpaceSummary heap_space = create_heap_space_summary();
99 return GCHeapSummary(heap_space, used());
100}
101
102MetaspaceSummary CollectedHeap::create_metaspace_summary() {
103 const MetaspaceSizes meta_space(
104 MetaspaceUtils::committed_bytes(),
105 MetaspaceUtils::used_bytes(),
106 MetaspaceUtils::reserved_bytes());
107 const MetaspaceSizes data_space(
108 MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
109 MetaspaceUtils::used_bytes(Metaspace::NonClassType),
110 MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
111 const MetaspaceSizes class_space(
112 MetaspaceUtils::committed_bytes(Metaspace::ClassType),
113 MetaspaceUtils::used_bytes(Metaspace::ClassType),
114 MetaspaceUtils::reserved_bytes(Metaspace::ClassType));
115
116 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
117 MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
118 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
119 MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
120
121 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
122 ms_chunk_free_list_summary, class_chunk_free_list_summary);
123}
124
125void CollectedHeap::print_heap_before_gc() {
126 Universe::print_heap_before_gc();
127 if (_gc_heap_log != NULL) {
128 _gc_heap_log->log_heap_before(this);
129 }
130}
131
132void CollectedHeap::print_heap_after_gc() {
133 Universe::print_heap_after_gc();
134 if (_gc_heap_log != NULL) {
135 _gc_heap_log->log_heap_after(this);
136 }
137}
138
139void CollectedHeap::print() const { print_on(tty); }
140
141void CollectedHeap::print_on_error(outputStream* st) const {
142 st->print_cr("Heap:");
143 print_extended_on(st);
144 st->cr();
145
146 BarrierSet::barrier_set()->print_on(st);
147}
148
149void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
150 const GCHeapSummary& heap_summary = create_heap_summary();
151 gc_tracer->report_gc_heap_summary(when, heap_summary);
152
153 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
154 gc_tracer->report_metaspace_summary(when, metaspace_summary);
155}
156
157void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
158 trace_heap(GCWhen::BeforeGC, gc_tracer);
159}
160
161void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
162 trace_heap(GCWhen::AfterGC, gc_tracer);
163}
164
165// WhiteBox API support for concurrent collectors. These are the
166// default implementations, for collectors which don't support this
167// feature.
168bool CollectedHeap::supports_concurrent_phase_control() const {
169 return false;
170}
171
172bool CollectedHeap::request_concurrent_phase(const char* phase) {
173 return false;
174}
175
176bool CollectedHeap::is_oop(oop object) const {
177 if (!check_obj_alignment(object)) {
178 return false;
179 }
180
181 if (!is_in_reserved(object)) {
182 return false;
183 }
184
185 if (is_in_reserved(object->klass_or_null())) {
186 return false;
187 }
188
189 return true;
190}
191
192// Memory state functions.
193
194
195CollectedHeap::CollectedHeap() :
196 _is_gc_active(false),
197 _total_collections(0),
198 _total_full_collections(0),
199 _gc_cause(GCCause::_no_gc),
200 _gc_lastcause(GCCause::_no_gc)
201{
202 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
203 const size_t elements_per_word = HeapWordSize / sizeof(jint);
204 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
205 max_len / elements_per_word);
206
207 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
208 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
209
210 if (UsePerfData) {
211 EXCEPTION_MARK;
212
213 // create the gc cause jvmstat counters
214 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
215 80, GCCause::to_string(_gc_cause), CHECK);
216
217 _perf_gc_lastcause =
218 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
219 80, GCCause::to_string(_gc_lastcause), CHECK);
220 }
221
222 // Create the ring log
223 if (LogEvents) {
224 _gc_heap_log = new GCHeapLog();
225 } else {
226 _gc_heap_log = NULL;
227 }
228}
229
230// This interface assumes that it's being called by the
231// vm thread. It collects the heap assuming that the
232// heap lock is already held and that we are executing in
233// the context of the vm thread.
234void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
235 assert(Thread::current()->is_VM_thread(), "Precondition#1");
236 assert(Heap_lock->is_locked(), "Precondition#2");
237 GCCauseSetter gcs(this, cause);
238 switch (cause) {
239 case GCCause::_heap_inspection:
240 case GCCause::_heap_dump:
241 case GCCause::_metadata_GC_threshold : {
242 HandleMark hm;
243 do_full_collection(false); // don't clear all soft refs
244 break;
245 }
246 case GCCause::_metadata_GC_clear_soft_refs: {
247 HandleMark hm;
248 do_full_collection(true); // do clear all soft refs
249 break;
250 }
251 default:
252 ShouldNotReachHere(); // Unexpected use of this function
253 }
254}
255
256MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
257 size_t word_size,
258 Metaspace::MetadataType mdtype) {
259 uint loop_count = 0;
260 uint gc_count = 0;
261 uint full_gc_count = 0;
262
263 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
264
265 do {
266 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
267 if (result != NULL) {
268 return result;
269 }
270
271 if (GCLocker::is_active_and_needs_gc()) {
272 // If the GCLocker is active, just expand and allocate.
273 // If that does not succeed, wait if this thread is not
274 // in a critical section itself.
275 result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
276 if (result != NULL) {
277 return result;
278 }
279 JavaThread* jthr = JavaThread::current();
280 if (!jthr->in_critical()) {
281 // Wait for JNI critical section to be exited
282 GCLocker::stall_until_clear();
283 // The GC invoked by the last thread leaving the critical
284 // section will be a young collection and a full collection
285 // is (currently) needed for unloading classes so continue
286 // to the next iteration to get a full GC.
287 continue;
288 } else {
289 if (CheckJNICalls) {
290 fatal("Possible deadlock due to allocating while"
291 " in jni critical section");
292 }
293 return NULL;
294 }
295 }
296
297 { // Need lock to get self consistent gc_count's
298 MutexLocker ml(Heap_lock);
299 gc_count = Universe::heap()->total_collections();
300 full_gc_count = Universe::heap()->total_full_collections();
301 }
302
303 // Generate a VM operation
304 VM_CollectForMetadataAllocation op(loader_data,
305 word_size,
306 mdtype,
307 gc_count,
308 full_gc_count,
309 GCCause::_metadata_GC_threshold);
310 VMThread::execute(&op);
311
312 // If GC was locked out, try again. Check before checking success because the
313 // prologue could have succeeded and the GC still have been locked out.
314 if (op.gc_locked()) {
315 continue;
316 }
317
318 if (op.prologue_succeeded()) {
319 return op.result();
320 }
321 loop_count++;
322 if ((QueuedAllocationWarningCount > 0) &&
323 (loop_count % QueuedAllocationWarningCount == 0)) {
324 log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
325 " size=" SIZE_FORMAT, loop_count, word_size);
326 }
327 } while (true); // Until a GC is done
328}
329
330MemoryUsage CollectedHeap::memory_usage() {
331 return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
332}
333
334
335#ifndef PRODUCT
336void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
337 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
338 for (size_t slot = 0; slot < size; slot += 1) {
339 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
340 "Found non badHeapWordValue in pre-allocation check");
341 }
342 }
343}
344#endif // PRODUCT
345
346size_t CollectedHeap::max_tlab_size() const {
347 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
348 // This restriction could be removed by enabling filling with multiple arrays.
349 // If we compute that the reasonable way as
350 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
351 // we'll overflow on the multiply, so we do the divide first.
352 // We actually lose a little by dividing first,
353 // but that just makes the TLAB somewhat smaller than the biggest array,
354 // which is fine, since we'll be able to fill that.
355 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
356 sizeof(jint) *
357 ((juint) max_jint / (size_t) HeapWordSize);
358 return align_down(max_int_size, MinObjAlignment);
359}
360
361size_t CollectedHeap::filler_array_hdr_size() {
362 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
363}
364
365size_t CollectedHeap::filler_array_min_size() {
366 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
367}
368
369#ifdef ASSERT
370void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
371{
372 assert(words >= min_fill_size(), "too small to fill");
373 assert(is_object_aligned(words), "unaligned size");
374 assert(Universe::heap()->is_in_reserved(start), "not in heap");
375 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
376}
377
378void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
379{
380 if (ZapFillerObjects && zap) {
381 Copy::fill_to_words(start + filler_array_hdr_size(),
382 words - filler_array_hdr_size(), 0XDEAFBABE);
383 }
384}
385#endif // ASSERT
386
387void
388CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
389{
390 assert(words >= filler_array_min_size(), "too small for an array");
391 assert(words <= filler_array_max_size(), "too big for a single object");
392
393 const size_t payload_size = words - filler_array_hdr_size();
394 const size_t len = payload_size * HeapWordSize / sizeof(jint);
395 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
396
397 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
398 allocator.initialize(start);
399 DEBUG_ONLY(zap_filler_array(start, words, zap);)
400}
401
402void
403CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
404{
405 assert(words <= filler_array_max_size(), "too big for a single object");
406
407 if (words >= filler_array_min_size()) {
408 fill_with_array(start, words, zap);
409 } else if (words > 0) {
410 assert(words == min_fill_size(), "unaligned size");
411 ObjAllocator allocator(SystemDictionary::Object_klass(), words);
412 allocator.initialize(start);
413 }
414}
415
416void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
417{
418 DEBUG_ONLY(fill_args_check(start, words);)
419 HandleMark hm; // Free handles before leaving.
420 fill_with_object_impl(start, words, zap);
421}
422
423void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
424{
425 DEBUG_ONLY(fill_args_check(start, words);)
426 HandleMark hm; // Free handles before leaving.
427
428 // Multiple objects may be required depending on the filler array maximum size. Fill
429 // the range up to that with objects that are filler_array_max_size sized. The
430 // remainder is filled with a single object.
431 const size_t min = min_fill_size();
432 const size_t max = filler_array_max_size();
433 while (words > max) {
434 const size_t cur = (words - max) >= min ? max : max - min;
435 fill_with_array(start, cur, zap);
436 start += cur;
437 words -= cur;
438 }
439
440 fill_with_object_impl(start, words, zap);
441}
442
443void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
444 CollectedHeap::fill_with_object(start, end, zap);
445}
446
447size_t CollectedHeap::min_dummy_object_size() const {
448 return oopDesc::header_size();
449}
450
451size_t CollectedHeap::tlab_alloc_reserve() const {
452 size_t min_size = min_dummy_object_size();
453 return min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
454}
455
456HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
457 size_t requested_size,
458 size_t* actual_size) {
459 guarantee(false, "thread-local allocation buffers not supported");
460 return NULL;
461}
462
463void CollectedHeap::ensure_parsability(bool retire_tlabs) {
464 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
465 "Should only be called at a safepoint or at start-up");
466
467 ThreadLocalAllocStats stats;
468
469 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) {
470 BarrierSet::barrier_set()->make_parsable(thread);
471 if (UseTLAB) {
472 if (retire_tlabs) {
473 thread->tlab().retire(&stats);
474 } else {
475 thread->tlab().make_parsable();
476 }
477 }
478 }
479
480 stats.publish();
481}
482
483void CollectedHeap::resize_all_tlabs() {
484 assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
485 "Should only resize tlabs at safepoint");
486
487 if (UseTLAB && ResizeTLAB) {
488 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
489 thread->tlab().resize();
490 }
491 }
492}
493
494void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
495 assert(timer != NULL, "timer is null");
496 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
497 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
498 HeapDumper::dump_heap();
499 }
500
501 LogTarget(Trace, gc, classhisto) lt;
502 if (lt.is_enabled()) {
503 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
504 ResourceMark rm;
505 LogStream ls(lt);
506 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
507 inspector.doit();
508 }
509}
510
511void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
512 full_gc_dump(timer, true);
513}
514
515void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
516 full_gc_dump(timer, false);
517}
518
519void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
520 // It is important to do this in a way such that concurrent readers can't
521 // temporarily think something is in the heap. (Seen this happen in asserts.)
522 _reserved.set_word_size(0);
523 _reserved.set_start(start);
524 _reserved.set_end(end);
525}
526
527void CollectedHeap::post_initialize() {
528 initialize_serviceability();
529}
530
531#ifndef PRODUCT
532
533bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
534 // Access to count is not atomic; the value does not have to be exact.
535 if (PromotionFailureALot) {
536 const size_t gc_num = total_collections();
537 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
538 if (elapsed_gcs >= PromotionFailureALotInterval) {
539 // Test for unsigned arithmetic wrap-around.
540 if (++*count >= PromotionFailureALotCount) {
541 *count = 0;
542 return true;
543 }
544 }
545 }
546 return false;
547}
548
549bool CollectedHeap::promotion_should_fail() {
550 return promotion_should_fail(&_promotion_failure_alot_count);
551}
552
553void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
554 if (PromotionFailureALot) {
555 _promotion_failure_alot_gc_number = total_collections();
556 *count = 0;
557 }
558}
559
560void CollectedHeap::reset_promotion_should_fail() {
561 reset_promotion_should_fail(&_promotion_failure_alot_count);
562}
563
564#endif // #ifndef PRODUCT
565
566bool CollectedHeap::supports_object_pinning() const {
567 return false;
568}
569
570oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
571 ShouldNotReachHere();
572 return NULL;
573}
574
575void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
576 ShouldNotReachHere();
577}
578
579void CollectedHeap::deduplicate_string(oop str) {
580 // Do nothing, unless overridden in subclass.
581}
582
583size_t CollectedHeap::obj_size(oop obj) const {
584 return obj->size();
585}
586
587uint32_t CollectedHeap::hash_oop(oop obj) const {
588 const uintptr_t addr = cast_from_oop<uintptr_t>(obj);
589 return static_cast<uint32_t>(addr >> LogMinObjAlignment);
590}
591