1/*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_SHARED_GENCOLLECTEDHEAP_HPP
26#define SHARE_GC_SHARED_GENCOLLECTEDHEAP_HPP
27
28#include "gc/shared/collectedHeap.hpp"
29#include "gc/shared/generation.hpp"
30#include "gc/shared/oopStorageParState.hpp"
31#include "gc/shared/softRefGenPolicy.hpp"
32
33class AdaptiveSizePolicy;
34class CardTableRS;
35class GCPolicyCounters;
36class GenerationSpec;
37class StrongRootsScope;
38class SubTasksDone;
39class WorkGang;
40
41// A "GenCollectedHeap" is a CollectedHeap that uses generational
42// collection. It has two generations, young and old.
43class GenCollectedHeap : public CollectedHeap {
44 friend class Generation;
45 friend class DefNewGeneration;
46 friend class TenuredGeneration;
47 friend class ConcurrentMarkSweepGeneration;
48 friend class CMSCollector;
49 friend class GenMarkSweep;
50 friend class VM_GenCollectForAllocation;
51 friend class VM_GenCollectFull;
52 friend class VM_GenCollectFullConcurrent;
53 friend class VM_GC_HeapInspection;
54 friend class VM_HeapDumper;
55 friend class HeapInspection;
56 friend class GCCauseSetter;
57 friend class VMStructs;
58public:
59 friend class VM_PopulateDumpSharedSpace;
60
61 enum GenerationType {
62 YoungGen,
63 OldGen
64 };
65
66protected:
67 Generation* _young_gen;
68 Generation* _old_gen;
69
70private:
71 GenerationSpec* _young_gen_spec;
72 GenerationSpec* _old_gen_spec;
73
74 // The singleton CardTable Remembered Set.
75 CardTableRS* _rem_set;
76
77 SoftRefGenPolicy _soft_ref_gen_policy;
78
79 // The sizing of the heap is controlled by a sizing policy.
80 AdaptiveSizePolicy* _size_policy;
81
82 GCPolicyCounters* _gc_policy_counters;
83
84 // Indicates that the most recent previous incremental collection failed.
85 // The flag is cleared when an action is taken that might clear the
86 // condition that caused that incremental collection to fail.
87 bool _incremental_collection_failed;
88
89 // In support of ExplicitGCInvokesConcurrent functionality
90 unsigned int _full_collections_completed;
91
92 // Collects the given generation.
93 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
94 bool run_verification, bool clear_soft_refs,
95 bool restore_marks_for_biased_locking);
96
97 // Reserve aligned space for the heap as needed by the contained generations.
98 char* allocate(size_t alignment, ReservedSpace* heap_rs);
99
100 // Initialize ("weak") refs processing support
101 void ref_processing_init();
102
103protected:
104
105 // The set of potentially parallel tasks in root scanning.
106 enum GCH_strong_roots_tasks {
107 GCH_PS_Universe_oops_do,
108 GCH_PS_JNIHandles_oops_do,
109 GCH_PS_ObjectSynchronizer_oops_do,
110 GCH_PS_FlatProfiler_oops_do,
111 GCH_PS_Management_oops_do,
112 GCH_PS_SystemDictionary_oops_do,
113 GCH_PS_ClassLoaderDataGraph_oops_do,
114 GCH_PS_jvmti_oops_do,
115 GCH_PS_CodeCache_oops_do,
116 AOT_ONLY(GCH_PS_aot_oops_do COMMA)
117 JVMCI_ONLY(GCH_PS_jvmci_oops_do COMMA)
118 GCH_PS_younger_gens,
119 // Leave this one last.
120 GCH_PS_NumElements
121 };
122
123 // Data structure for claiming the (potentially) parallel tasks in
124 // (gen-specific) roots processing.
125 SubTasksDone* _process_strong_tasks;
126
127 GCMemoryManager* _young_manager;
128 GCMemoryManager* _old_manager;
129
130 // Helper functions for allocation
131 HeapWord* attempt_allocation(size_t size,
132 bool is_tlab,
133 bool first_only);
134
135 // Helper function for two callbacks below.
136 // Considers collection of the first max_level+1 generations.
137 void do_collection(bool full,
138 bool clear_all_soft_refs,
139 size_t size,
140 bool is_tlab,
141 GenerationType max_generation);
142
143 // Callback from VM_GenCollectForAllocation operation.
144 // This function does everything necessary/possible to satisfy an
145 // allocation request that failed in the youngest generation that should
146 // have handled it (including collection, expansion, etc.)
147 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
148
149 // Callback from VM_GenCollectFull operation.
150 // Perform a full collection of the first max_level+1 generations.
151 virtual void do_full_collection(bool clear_all_soft_refs);
152 void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation);
153
154 // Does the "cause" of GC indicate that
155 // we absolutely __must__ clear soft refs?
156 bool must_clear_all_soft_refs();
157
158 GenCollectedHeap(Generation::Name young,
159 Generation::Name old,
160 const char* policy_counters_name);
161
162public:
163
164 // Returns JNI_OK on success
165 virtual jint initialize();
166 virtual CardTableRS* create_rem_set(const MemRegion& reserved_region);
167
168 void initialize_size_policy(size_t init_eden_size,
169 size_t init_promo_size,
170 size_t init_survivor_size);
171
172 // Does operations required after initialization has been done.
173 void post_initialize();
174
175 Generation* young_gen() const { return _young_gen; }
176 Generation* old_gen() const { return _old_gen; }
177
178 bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
179 bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
180
181 GenerationSpec* young_gen_spec() const;
182 GenerationSpec* old_gen_spec() const;
183
184 virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_gen_policy; }
185
186 // Adaptive size policy
187 virtual AdaptiveSizePolicy* size_policy() {
188 return _size_policy;
189 }
190
191 // Performance Counter support
192 GCPolicyCounters* counters() { return _gc_policy_counters; }
193
194 size_t capacity() const;
195 size_t used() const;
196
197 // Save the "used_region" for both generations.
198 void save_used_regions();
199
200 size_t max_capacity() const;
201
202 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
203
204 // We may support a shared contiguous allocation area, if the youngest
205 // generation does.
206 bool supports_inline_contig_alloc() const;
207 HeapWord* volatile* top_addr() const;
208 HeapWord** end_addr() const;
209
210 // Perform a full collection of the heap; intended for use in implementing
211 // "System.gc". This implies as full a collection as the CollectedHeap
212 // supports. Caller does not hold the Heap_lock on entry.
213 virtual void collect(GCCause::Cause cause);
214
215 // The same as above but assume that the caller holds the Heap_lock.
216 void collect_locked(GCCause::Cause cause);
217
218 // Perform a full collection of generations up to and including max_generation.
219 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
220 void collect(GCCause::Cause cause, GenerationType max_generation);
221
222 // Returns "TRUE" iff "p" points into the committed areas of the heap.
223 // The methods is_in() and is_in_youngest() may be expensive to compute
224 // in general, so, to prevent their inadvertent use in product jvm's, we
225 // restrict their use to assertion checking or verification only.
226 bool is_in(const void* p) const;
227
228 // Returns true if the reference is to an object in the reserved space
229 // for the young generation.
230 // Assumes the the young gen address range is less than that of the old gen.
231 bool is_in_young(oop p);
232
233#ifdef ASSERT
234 bool is_in_partial_collection(const void* p);
235#endif
236
237 // Optimized nmethod scanning support routines
238 virtual void register_nmethod(nmethod* nm);
239 virtual void unregister_nmethod(nmethod* nm);
240 virtual void verify_nmethod(nmethod* nm);
241 virtual void flush_nmethod(nmethod* nm);
242
243 void prune_scavengable_nmethods();
244
245 // Iteration functions.
246 void oop_iterate(OopIterateClosure* cl);
247 void object_iterate(ObjectClosure* cl);
248 void safe_object_iterate(ObjectClosure* cl);
249 Space* space_containing(const void* addr) const;
250
251 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
252 // each address in the (reserved) heap is a member of exactly
253 // one block. The defining characteristic of a block is that it is
254 // possible to find its size, and thus to progress forward to the next
255 // block. (Blocks may be of different sizes.) Thus, blocks may
256 // represent Java objects, or they might be free blocks in a
257 // free-list-based heap (or subheap), as long as the two kinds are
258 // distinguishable and the size of each is determinable.
259
260 // Returns the address of the start of the "block" that contains the
261 // address "addr". We say "blocks" instead of "object" since some heaps
262 // may not pack objects densely; a chunk may either be an object or a
263 // non-object.
264 virtual HeapWord* block_start(const void* addr) const;
265
266 // Requires "addr" to be the start of a block, and returns "TRUE" iff
267 // the block is an object. Assumes (and verifies in non-product
268 // builds) that addr is in the allocated part of the heap and is
269 // the start of a chunk.
270 virtual bool block_is_obj(const HeapWord* addr) const;
271
272 // Section on TLAB's.
273 virtual bool supports_tlab_allocation() const;
274 virtual size_t tlab_capacity(Thread* thr) const;
275 virtual size_t tlab_used(Thread* thr) const;
276 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
277 virtual HeapWord* allocate_new_tlab(size_t min_size,
278 size_t requested_size,
279 size_t* actual_size);
280
281 // The "requestor" generation is performing some garbage collection
282 // action for which it would be useful to have scratch space. The
283 // requestor promises to allocate no more than "max_alloc_words" in any
284 // older generation (via promotion say.) Any blocks of space that can
285 // be provided are returned as a list of ScratchBlocks, sorted by
286 // decreasing size.
287 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
288 // Allow each generation to reset any scratch space that it has
289 // contributed as it needs.
290 void release_scratch();
291
292 // Ensure parsability: override
293 virtual void ensure_parsability(bool retire_tlabs);
294
295 // Time in ms since the longest time a collector ran in
296 // in any generation.
297 virtual jlong millis_since_last_gc();
298
299 // Total number of full collections completed.
300 unsigned int total_full_collections_completed() {
301 assert(_full_collections_completed <= _total_full_collections,
302 "Can't complete more collections than were started");
303 return _full_collections_completed;
304 }
305
306 // Update above counter, as appropriate, at the end of a stop-world GC cycle
307 unsigned int update_full_collections_completed();
308 // Update above counter, as appropriate, at the end of a concurrent GC cycle
309 unsigned int update_full_collections_completed(unsigned int count);
310
311 // Update "time of last gc" for all generations to "now".
312 void update_time_of_last_gc(jlong now) {
313 _young_gen->update_time_of_last_gc(now);
314 _old_gen->update_time_of_last_gc(now);
315 }
316
317 // Update the gc statistics for each generation.
318 void update_gc_stats(Generation* current_generation, bool full) {
319 _old_gen->update_gc_stats(current_generation, full);
320 }
321
322 bool no_gc_in_progress() { return !is_gc_active(); }
323
324 // Override.
325 void prepare_for_verify();
326
327 // Override.
328 void verify(VerifyOption option);
329
330 // Override.
331 virtual void print_on(outputStream* st) const;
332 virtual void print_gc_threads_on(outputStream* st) const;
333 virtual void gc_threads_do(ThreadClosure* tc) const;
334 virtual void print_tracing_info() const;
335
336 void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
337
338 // The functions below are helper functions that a subclass of
339 // "CollectedHeap" can use in the implementation of its virtual
340 // functions.
341
342 class GenClosure : public StackObj {
343 public:
344 virtual void do_generation(Generation* gen) = 0;
345 };
346
347 // Apply "cl.do_generation" to all generations in the heap
348 // If "old_to_young" determines the order.
349 void generation_iterate(GenClosure* cl, bool old_to_young);
350
351 // Return "true" if all generations have reached the
352 // maximal committed limit that they can reach, without a garbage
353 // collection.
354 virtual bool is_maximal_no_gc() const;
355
356 // This function returns the CardTableRS object that allows us to scan
357 // generations in a fully generational heap.
358 CardTableRS* rem_set() { return _rem_set; }
359
360 // Convenience function to be used in situations where the heap type can be
361 // asserted to be this type.
362 static GenCollectedHeap* heap();
363
364 // The ScanningOption determines which of the roots
365 // the closure is applied to:
366 // "SO_None" does none;
367 enum ScanningOption {
368 SO_None = 0x0,
369 SO_AllCodeCache = 0x8,
370 SO_ScavengeCodeCache = 0x10
371 };
372
373 protected:
374 void process_roots(StrongRootsScope* scope,
375 ScanningOption so,
376 OopClosure* strong_roots,
377 CLDClosure* strong_cld_closure,
378 CLDClosure* weak_cld_closure,
379 CodeBlobToOopClosure* code_roots);
380
381 // Accessor for memory state verification support
382 NOT_PRODUCT(
383 virtual size_t skip_header_HeapWords() { return 0; }
384 )
385
386 virtual void gc_prologue(bool full);
387 virtual void gc_epilogue(bool full);
388
389 public:
390 void young_process_roots(StrongRootsScope* scope,
391 OopsInGenClosure* root_closure,
392 OopsInGenClosure* old_gen_closure,
393 CLDClosure* cld_closure);
394
395 void full_process_roots(StrongRootsScope* scope,
396 bool is_adjust_phase,
397 ScanningOption so,
398 bool only_strong_roots,
399 OopsInGenClosure* root_closure,
400 CLDClosure* cld_closure);
401
402 // Apply "root_closure" to all the weak roots of the system.
403 // These include JNI weak roots, string table,
404 // and referents of reachable weak refs.
405 void gen_process_weak_roots(OopClosure* root_closure);
406
407 // Set the saved marks of generations, if that makes sense.
408 // In particular, if any generation might iterate over the oops
409 // in other generations, it should call this method.
410 void save_marks();
411
412 // Returns "true" iff no allocations have occurred since the last
413 // call to "save_marks".
414 bool no_allocs_since_save_marks();
415
416 // Returns true if an incremental collection is likely to fail.
417 // We optionally consult the young gen, if asked to do so;
418 // otherwise we base our answer on whether the previous incremental
419 // collection attempt failed with no corrective action as of yet.
420 bool incremental_collection_will_fail(bool consult_young) {
421 // The first disjunct remembers if an incremental collection failed, even
422 // when we thought (second disjunct) that it would not.
423 return incremental_collection_failed() ||
424 (consult_young && !_young_gen->collection_attempt_is_safe());
425 }
426
427 // If a generation bails out of an incremental collection,
428 // it sets this flag.
429 bool incremental_collection_failed() const {
430 return _incremental_collection_failed;
431 }
432 void set_incremental_collection_failed() {
433 _incremental_collection_failed = true;
434 }
435 void clear_incremental_collection_failed() {
436 _incremental_collection_failed = false;
437 }
438
439 // Promotion of obj into gen failed. Try to promote obj to higher
440 // gens in ascending order; return the new location of obj if successful.
441 // Otherwise, try expand-and-allocate for obj in both the young and old
442 // generation; return the new location of obj if successful. Otherwise, return NULL.
443 oop handle_failed_promotion(Generation* old_gen,
444 oop obj,
445 size_t obj_size);
446
447
448private:
449 // Return true if an allocation should be attempted in the older generation
450 // if it fails in the younger generation. Return false, otherwise.
451 bool should_try_older_generation_allocation(size_t word_size) const;
452
453 // Try to allocate space by expanding the heap.
454 HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
455
456 HeapWord* mem_allocate_work(size_t size,
457 bool is_tlab,
458 bool* gc_overhead_limit_was_exceeded);
459
460 // Override
461 void check_for_non_bad_heap_word_value(HeapWord* addr,
462 size_t size) PRODUCT_RETURN;
463
464#if INCLUDE_SERIALGC
465 // For use by mark-sweep. As implemented, mark-sweep-compact is global
466 // in an essential way: compaction is performed across generations, by
467 // iterating over spaces.
468 void prepare_for_compaction();
469#endif
470
471 // Perform a full collection of the generations up to and including max_generation.
472 // This is the low level interface used by the public versions of
473 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
474 void collect_locked(GCCause::Cause cause, GenerationType max_generation);
475
476 // Save the tops of the spaces in all generations
477 void record_gen_tops_before_GC() PRODUCT_RETURN;
478
479 // Return true if we need to perform full collection.
480 bool should_do_full_collection(size_t size, bool full,
481 bool is_tlab, GenerationType max_gen) const;
482};
483
484#endif // SHARE_GC_SHARED_GENCOLLECTEDHEAP_HPP
485