1/*
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
26#define SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
27
28#include "gc/parallel/objectStartArray.hpp"
29#include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
30#include "gc/parallel/psOldGen.hpp"
31#include "gc/parallel/psYoungGen.hpp"
32#include "gc/shared/cardTableBarrierSet.hpp"
33#include "gc/shared/collectedHeap.hpp"
34#include "gc/shared/gcPolicyCounters.hpp"
35#include "gc/shared/gcWhen.hpp"
36#include "gc/shared/referenceProcessor.hpp"
37#include "gc/shared/softRefPolicy.hpp"
38#include "gc/shared/strongRootsScope.hpp"
39#include "logging/log.hpp"
40#include "memory/metaspace.hpp"
41#include "utilities/growableArray.hpp"
42#include "utilities/ostream.hpp"
43
44class AdjoiningGenerations;
45class GCHeapSummary;
46class GCTaskManager;
47class MemoryManager;
48class MemoryPool;
49class PSAdaptiveSizePolicy;
50class PSCardTable;
51class PSHeapSummary;
52
53class ParallelScavengeHeap : public CollectedHeap {
54 friend class VMStructs;
55 private:
56 static PSYoungGen* _young_gen;
57 static PSOldGen* _old_gen;
58
59 // Sizing policy for entire heap
60 static PSAdaptiveSizePolicy* _size_policy;
61 static PSGCAdaptivePolicyCounters* _gc_policy_counters;
62
63 SoftRefPolicy _soft_ref_policy;
64
65 // Collection of generations that are adjacent in the
66 // space reserved for the heap.
67 AdjoiningGenerations* _gens;
68 unsigned int _death_march_count;
69
70 // The task manager
71 static GCTaskManager* _gc_task_manager;
72
73 GCMemoryManager* _young_manager;
74 GCMemoryManager* _old_manager;
75
76 MemoryPool* _eden_pool;
77 MemoryPool* _survivor_pool;
78 MemoryPool* _old_pool;
79
80 virtual void initialize_serviceability();
81
82 void trace_heap(GCWhen::Type when, const GCTracer* tracer);
83
84 protected:
85 static inline size_t total_invocations();
86 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size);
87
88 inline bool should_alloc_in_eden(size_t size) const;
89 inline void death_march_check(HeapWord* const result, size_t size);
90 HeapWord* mem_allocate_old_gen(size_t size);
91
92 public:
93 ParallelScavengeHeap() :
94 CollectedHeap(),
95 _gens(NULL),
96 _death_march_count(0),
97 _young_manager(NULL),
98 _old_manager(NULL),
99 _eden_pool(NULL),
100 _survivor_pool(NULL),
101 _old_pool(NULL) { }
102
103 // For use by VM operations
104 enum CollectionType {
105 Scavenge,
106 MarkSweep
107 };
108
109 virtual Name kind() const {
110 return CollectedHeap::Parallel;
111 }
112
113 virtual const char* name() const {
114 return "Parallel";
115 }
116
117 virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
118
119 virtual GrowableArray<GCMemoryManager*> memory_managers();
120 virtual GrowableArray<MemoryPool*> memory_pools();
121
122 static PSYoungGen* young_gen() { return _young_gen; }
123 static PSOldGen* old_gen() { return _old_gen; }
124
125 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
126
127 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
128
129 static ParallelScavengeHeap* heap();
130
131 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
132
133 CardTableBarrierSet* barrier_set();
134 PSCardTable* card_table();
135
136 AdjoiningGenerations* gens() { return _gens; }
137
138 // Returns JNI_OK on success
139 virtual jint initialize();
140
141 void post_initialize();
142 void update_counters();
143
144 size_t capacity() const;
145 size_t used() const;
146
147 // Return "true" if all generations have reached the
148 // maximal committed limit that they can reach, without a garbage
149 // collection.
150 virtual bool is_maximal_no_gc() const;
151
152 virtual void register_nmethod(nmethod* nm);
153 virtual void unregister_nmethod(nmethod* nm);
154 virtual void verify_nmethod(nmethod* nm);
155 virtual void flush_nmethod(nmethod* nm);
156
157 void prune_scavengable_nmethods();
158
159 size_t max_capacity() const;
160
161 // Whether p is in the allocated part of the heap
162 bool is_in(const void* p) const;
163
164 bool is_in_reserved(const void* p) const;
165
166 bool is_in_young(oop p); // reserved part
167 bool is_in_old(oop p); // reserved part
168
169 // Memory allocation. "gc_time_limit_was_exceeded" will
170 // be set to true if the adaptive size policy determine that
171 // an excessive amount of time is being spent doing collections
172 // and caused a NULL to be returned. If a NULL is not returned,
173 // "gc_time_limit_was_exceeded" has an undefined meaning.
174 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
175
176 // Allocation attempt(s) during a safepoint. It should never be called
177 // to allocate a new TLAB as this allocation might be satisfied out
178 // of the old generation.
179 HeapWord* failed_mem_allocate(size_t size);
180
181 // Support for System.gc()
182 void collect(GCCause::Cause cause);
183
184 // These also should be called by the vm thread at a safepoint (e.g., from a
185 // VM operation).
186 //
187 // The first collects the young generation only, unless the scavenge fails; it
188 // will then attempt a full gc. The second collects the entire heap; if
189 // maximum_compaction is true, it will compact everything and clear all soft
190 // references.
191 inline void invoke_scavenge();
192
193 // Perform a full collection
194 virtual void do_full_collection(bool clear_all_soft_refs);
195
196 bool supports_inline_contig_alloc() const { return !UseNUMA; }
197
198 HeapWord* volatile* top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord* volatile*)-1; }
199 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
200
201 void ensure_parsability(bool retire_tlabs);
202 void resize_all_tlabs();
203
204 bool supports_tlab_allocation() const { return true; }
205
206 size_t tlab_capacity(Thread* thr) const;
207 size_t tlab_used(Thread* thr) const;
208 size_t unsafe_max_tlab_alloc(Thread* thr) const;
209
210 void object_iterate(ObjectClosure* cl);
211 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
212
213 HeapWord* block_start(const void* addr) const;
214 bool block_is_obj(const HeapWord* addr) const;
215
216 jlong millis_since_last_gc();
217
218 void prepare_for_verify();
219 PSHeapSummary create_ps_heap_summary();
220 virtual void print_on(outputStream* st) const;
221 virtual void print_on_error(outputStream* st) const;
222 virtual void print_gc_threads_on(outputStream* st) const;
223 virtual void gc_threads_do(ThreadClosure* tc) const;
224 virtual void print_tracing_info() const;
225
226 void verify(VerifyOption option /* ignored */);
227
228 // Resize the young generation. The reserved space for the
229 // generation may be expanded in preparation for the resize.
230 void resize_young_gen(size_t eden_size, size_t survivor_size);
231
232 // Resize the old generation. The reserved space for the
233 // generation may be expanded in preparation for the resize.
234 void resize_old_gen(size_t desired_free_space);
235
236 // Save the tops of the spaces in all generations
237 void record_gen_tops_before_GC() PRODUCT_RETURN;
238
239 // Mangle the unused parts of all spaces in the heap
240 void gen_mangle_unused_area() PRODUCT_RETURN;
241
242 // Call these in sequential code around the processing of strong roots.
243 class ParStrongRootsScope : public MarkScope {
244 public:
245 ParStrongRootsScope();
246 ~ParStrongRootsScope();
247 };
248
249 GCMemoryManager* old_gc_manager() const { return _old_manager; }
250 GCMemoryManager* young_gc_manager() const { return _young_manager; }
251};
252
253// Simple class for storing info about the heap at the start of GC, to be used
254// after GC for comparison/printing.
255class PreGCValues {
256public:
257 PreGCValues(ParallelScavengeHeap* heap) :
258 _heap_used(heap->used()),
259 _young_gen_used(heap->young_gen()->used_in_bytes()),
260 _old_gen_used(heap->old_gen()->used_in_bytes()),
261 _metadata_used(MetaspaceUtils::used_bytes()) { };
262
263 size_t heap_used() const { return _heap_used; }
264 size_t young_gen_used() const { return _young_gen_used; }
265 size_t old_gen_used() const { return _old_gen_used; }
266 size_t metadata_used() const { return _metadata_used; }
267
268private:
269 size_t _heap_used;
270 size_t _young_gen_used;
271 size_t _old_gen_used;
272 size_t _metadata_used;
273};
274
275// Class that can be used to print information about the
276// adaptive size policy at intervals specified by
277// AdaptiveSizePolicyOutputInterval. Only print information
278// if an adaptive size policy is in use.
279class AdaptiveSizePolicyOutput : AllStatic {
280 static bool enabled() {
281 return UseParallelGC &&
282 UseAdaptiveSizePolicy &&
283 log_is_enabled(Debug, gc, ergo);
284 }
285 public:
286 static void print() {
287 if (enabled()) {
288 ParallelScavengeHeap::heap()->size_policy()->print();
289 }
290 }
291
292 static void print(AdaptiveSizePolicy* size_policy, uint count) {
293 bool do_print =
294 enabled() &&
295 (AdaptiveSizePolicyOutputInterval > 0) &&
296 (count % AdaptiveSizePolicyOutputInterval) == 0;
297
298 if (do_print) {
299 size_policy->print();
300 }
301 }
302};
303
304#endif // SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
305