1/*
2 * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/cms/cmsCardTable.hpp"
27#include "gc/cms/cmsVMOperations.hpp"
28#include "gc/cms/compactibleFreeListSpace.hpp"
29#include "gc/cms/concurrentMarkSweepGeneration.hpp"
30#include "gc/cms/concurrentMarkSweepThread.hpp"
31#include "gc/cms/cmsHeap.hpp"
32#include "gc/cms/parNewGeneration.hpp"
33#include "gc/shared/genCollectedHeap.hpp"
34#include "gc/shared/genMemoryPools.hpp"
35#include "gc/shared/genOopClosures.inline.hpp"
36#include "gc/shared/strongRootsScope.hpp"
37#include "gc/shared/workgroup.hpp"
38#include "memory/universe.hpp"
39#include "oops/oop.inline.hpp"
40#include "runtime/vmThread.hpp"
41#include "services/memoryManager.hpp"
42#include "utilities/stack.inline.hpp"
43
44class CompactibleFreeListSpacePool : public CollectedMemoryPool {
45private:
46 CompactibleFreeListSpace* _space;
47public:
48 CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
49 const char* name,
50 size_t max_size,
51 bool support_usage_threshold) :
52 CollectedMemoryPool(name, space->capacity(), max_size, support_usage_threshold),
53 _space(space) {
54 }
55
56 MemoryUsage get_memory_usage() {
57 size_t max_heap_size = (available_for_allocation() ? max_size() : 0);
58 size_t used = used_in_bytes();
59 size_t committed = _space->capacity();
60
61 return MemoryUsage(initial_size(), used, committed, max_heap_size);
62 }
63
64 size_t used_in_bytes() {
65 return _space->used();
66 }
67};
68
69CMSHeap::CMSHeap() :
70 GenCollectedHeap(Generation::ParNew,
71 Generation::ConcurrentMarkSweep,
72 "ParNew:CMS"),
73 _workers(NULL),
74 _eden_pool(NULL),
75 _survivor_pool(NULL),
76 _old_pool(NULL) {
77}
78
79jint CMSHeap::initialize() {
80 jint status = GenCollectedHeap::initialize();
81 if (status != JNI_OK) return status;
82
83 _workers = new WorkGang("GC Thread", ParallelGCThreads,
84 /* are_GC_task_threads */true,
85 /* are_ConcurrentGC_threads */false);
86 if (_workers == NULL) {
87 return JNI_ENOMEM;
88 }
89 _workers->initialize_workers();
90
91 // If we are running CMS, create the collector responsible
92 // for collecting the CMS generations.
93 if (!create_cms_collector()) {
94 return JNI_ENOMEM;
95 }
96
97 return JNI_OK;
98}
99
100CardTableRS* CMSHeap::create_rem_set(const MemRegion& reserved_region) {
101 return new CMSCardTable(reserved_region);
102}
103
104void CMSHeap::initialize_serviceability() {
105 _young_manager = new GCMemoryManager("ParNew", "end of minor GC");
106 _old_manager = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC");
107
108 ParNewGeneration* young = young_gen();
109 _eden_pool = new ContiguousSpacePool(young->eden(),
110 "Par Eden Space",
111 young->max_eden_size(),
112 false);
113
114 _survivor_pool = new SurvivorContiguousSpacePool(young,
115 "Par Survivor Space",
116 young->max_survivor_size(),
117 false);
118
119 ConcurrentMarkSweepGeneration* old = (ConcurrentMarkSweepGeneration*) old_gen();
120 _old_pool = new CompactibleFreeListSpacePool(old->cmsSpace(),
121 "CMS Old Gen",
122 old->reserved().byte_size(),
123 true);
124
125 _young_manager->add_pool(_eden_pool);
126 _young_manager->add_pool(_survivor_pool);
127 young->set_gc_manager(_young_manager);
128
129 _old_manager->add_pool(_eden_pool);
130 _old_manager->add_pool(_survivor_pool);
131 _old_manager->add_pool(_old_pool);
132 old ->set_gc_manager(_old_manager);
133
134}
135
136CMSHeap* CMSHeap::heap() {
137 CollectedHeap* heap = Universe::heap();
138 assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
139 assert(heap->kind() == CollectedHeap::CMS, "Invalid name");
140 return static_cast<CMSHeap*>(heap);
141}
142
143void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
144 assert(workers() != NULL, "should have workers here");
145 workers()->threads_do(tc);
146 ConcurrentMarkSweepThread::threads_do(tc);
147}
148
149void CMSHeap::print_gc_threads_on(outputStream* st) const {
150 assert(workers() != NULL, "should have workers here");
151 workers()->print_worker_threads_on(st);
152 ConcurrentMarkSweepThread::print_all_on(st);
153}
154
155void CMSHeap::print_on_error(outputStream* st) const {
156 GenCollectedHeap::print_on_error(st);
157 st->cr();
158 CMSCollector::print_on_error(st);
159}
160
161bool CMSHeap::create_cms_collector() {
162 assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
163 "Unexpected generation kinds");
164 CMSCollector* collector =
165 new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(), rem_set());
166
167 if (collector == NULL || !collector->completed_initialization()) {
168 if (collector) {
169 delete collector; // Be nice in embedded situation
170 }
171 vm_shutdown_during_initialization("Could not create CMS collector");
172 return false;
173 }
174 return true; // success
175}
176
177void CMSHeap::collect(GCCause::Cause cause) {
178 if (should_do_concurrent_full_gc(cause)) {
179 // Mostly concurrent full collection.
180 collect_mostly_concurrent(cause);
181 } else {
182 GenCollectedHeap::collect(cause);
183 }
184}
185
186bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
187 switch (cause) {
188 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
189 case GCCause::_java_lang_system_gc:
190 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
191 default: return false;
192 }
193}
194
195void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) {
196 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
197
198 MutexLocker ml(Heap_lock);
199 // Read the GC counts while holding the Heap_lock
200 unsigned int full_gc_count_before = total_full_collections();
201 unsigned int gc_count_before = total_collections();
202 {
203 MutexUnlocker mu(Heap_lock);
204 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
205 VMThread::execute(&op);
206 }
207}
208
209void CMSHeap::stop() {
210 ConcurrentMarkSweepThread::cmst()->stop();
211}
212
213void CMSHeap::safepoint_synchronize_begin() {
214 ConcurrentMarkSweepThread::synchronize(false);
215}
216
217void CMSHeap::safepoint_synchronize_end() {
218 ConcurrentMarkSweepThread::desynchronize(false);
219}
220
221void CMSHeap::cms_process_roots(StrongRootsScope* scope,
222 bool young_gen_as_roots,
223 ScanningOption so,
224 bool only_strong_roots,
225 OopsInGenClosure* root_closure,
226 CLDClosure* cld_closure) {
227 MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
228 CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
229
230 process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
231
232 if (young_gen_as_roots &&
233 _process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
234 root_closure->set_generation(young_gen());
235 young_gen()->oop_iterate(root_closure);
236 root_closure->reset_generation();
237 }
238
239 _process_strong_tasks->all_tasks_completed(scope->n_threads());
240}
241
242void CMSHeap::gc_prologue(bool full) {
243 always_do_update_barrier = false;
244 GenCollectedHeap::gc_prologue(full);
245};
246
247void CMSHeap::gc_epilogue(bool full) {
248 GenCollectedHeap::gc_epilogue(full);
249 always_do_update_barrier = true;
250};
251
252GrowableArray<GCMemoryManager*> CMSHeap::memory_managers() {
253 GrowableArray<GCMemoryManager*> memory_managers(2);
254 memory_managers.append(_young_manager);
255 memory_managers.append(_old_manager);
256 return memory_managers;
257}
258
259GrowableArray<MemoryPool*> CMSHeap::memory_pools() {
260 GrowableArray<MemoryPool*> memory_pools(3);
261 memory_pools.append(_eden_pool);
262 memory_pools.append(_survivor_pool);
263 memory_pools.append(_old_pool);
264 return memory_pools;
265}
266