1/*
2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "aot/aotLoader.hpp"
27#include "classfile/classLoaderDataGraph.hpp"
28#include "classfile/systemDictionary.hpp"
29#include "code/codeCache.hpp"
30#include "gc/parallel/parallelScavengeHeap.hpp"
31#include "gc/parallel/pcTasks.hpp"
32#include "gc/parallel/psCompactionManager.inline.hpp"
33#include "gc/parallel/psParallelCompact.inline.hpp"
34#include "gc/shared/collectedHeap.hpp"
35#include "gc/shared/gcTimer.hpp"
36#include "gc/shared/gcTraceTime.inline.hpp"
37#include "logging/log.hpp"
38#include "memory/iterator.inline.hpp"
39#include "memory/resourceArea.hpp"
40#include "memory/universe.hpp"
41#include "oops/objArrayKlass.inline.hpp"
42#include "oops/oop.inline.hpp"
43#include "prims/jvmtiExport.hpp"
44#include "runtime/jniHandles.hpp"
45#include "runtime/thread.hpp"
46#include "runtime/vmThread.hpp"
47#include "services/management.hpp"
48#include "utilities/stack.inline.hpp"
49#if INCLUDE_JVMCI
50#include "jvmci/jvmci.hpp"
51#endif
52
53//
54// ThreadRootsMarkingTask
55//
56
57void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
58 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
59
60 ResourceMark rm;
61
62 ParCompactionManager* cm =
63 ParCompactionManager::gc_thread_compaction_manager(which);
64
65 PCMarkAndPushClosure mark_and_push_closure(cm);
66 MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
67
68 _thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
69
70 // Do the real work
71 cm->follow_marking_stacks();
72}
73
74
75void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
76 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
77
78 ParCompactionManager* cm =
79 ParCompactionManager::gc_thread_compaction_manager(which);
80 PCMarkAndPushClosure mark_and_push_closure(cm);
81
82 switch (_root_type) {
83 case universe:
84 Universe::oops_do(&mark_and_push_closure);
85 break;
86
87 case jni_handles:
88 JNIHandles::oops_do(&mark_and_push_closure);
89 break;
90
91 case threads:
92 {
93 ResourceMark rm;
94 MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
95 Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
96 }
97 break;
98
99 case object_synchronizer:
100 ObjectSynchronizer::oops_do(&mark_and_push_closure);
101 break;
102
103 case management:
104 Management::oops_do(&mark_and_push_closure);
105 break;
106
107 case jvmti:
108 JvmtiExport::oops_do(&mark_and_push_closure);
109 break;
110
111 case system_dictionary:
112 SystemDictionary::oops_do(&mark_and_push_closure);
113 break;
114
115 case class_loader_data: {
116 CLDToOopClosure cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_strong);
117 ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
118 }
119 break;
120
121 case code_cache:
122 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
123 //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
124 AOTLoader::oops_do(&mark_and_push_closure);
125 break;
126
127#if INCLUDE_JVMCI
128 case jvmci:
129 JVMCI::oops_do(&mark_and_push_closure);
130 break;
131#endif
132
133 default:
134 fatal("Unknown root type");
135 }
136
137 // Do the real work
138 cm->follow_marking_stacks();
139}
140
141
142//
143// RefProcTaskProxy
144//
145
146void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
147{
148 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
149
150 ParCompactionManager* cm =
151 ParCompactionManager::gc_thread_compaction_manager(which);
152 PCMarkAndPushClosure mark_and_push_closure(cm);
153 ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
154 _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
155 mark_and_push_closure, follow_stack_closure);
156}
157
158//
159// RefProcTaskExecutor
160//
161
162void RefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers)
163{
164 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
165 uint active_gc_threads = heap->gc_task_manager()->active_workers();
166 assert(active_gc_threads == ergo_workers,
167 "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
168 ergo_workers, active_gc_threads);
169 OopTaskQueueSet* qset = ParCompactionManager::stack_array();
170 TaskTerminator terminator(active_gc_threads, qset);
171
172 GCTaskQueue* q = GCTaskQueue::create();
173 for(uint i=0; i<active_gc_threads; i++) {
174 q->enqueue(new RefProcTaskProxy(task, i));
175 }
176 if (task.marks_oops_alive() && (active_gc_threads>1)) {
177 for (uint j=0; j<active_gc_threads; j++) {
178 q->enqueue(new StealMarkingTask(terminator.terminator()));
179 }
180 }
181 PSParallelCompact::gc_task_manager()->execute_and_wait(q);
182}
183
184//
185// StealMarkingTask
186//
187
188StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
189 _terminator(t) {}
190
191void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
192 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
193
194 ParCompactionManager* cm =
195 ParCompactionManager::gc_thread_compaction_manager(which);
196
197 oop obj = NULL;
198 ObjArrayTask task;
199 do {
200 while (ParCompactionManager::steal_objarray(which, task)) {
201 cm->follow_array((objArrayOop)task.obj(), task.index());
202 cm->follow_marking_stacks();
203 }
204 while (ParCompactionManager::steal(which, obj)) {
205 cm->follow_contents(obj);
206 cm->follow_marking_stacks();
207 }
208 } while (!terminator()->offer_termination());
209}
210
211//
212// CompactionWithStealingTask
213//
214
215CompactionWithStealingTask::CompactionWithStealingTask(ParallelTaskTerminator* t):
216 _terminator(t) {}
217
218void CompactionWithStealingTask::do_it(GCTaskManager* manager, uint which) {
219 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
220
221 ParCompactionManager* cm =
222 ParCompactionManager::gc_thread_compaction_manager(which);
223
224 // Drain the stacks that have been preloaded with regions
225 // that are ready to fill.
226
227 cm->drain_region_stacks();
228
229 guarantee(cm->region_stack()->is_empty(), "Not empty");
230
231 size_t region_index = 0;
232
233 while(true) {
234 if (ParCompactionManager::steal(which, region_index)) {
235 PSParallelCompact::fill_and_update_region(cm, region_index);
236 cm->drain_region_stacks();
237 } else {
238 if (terminator()->offer_termination()) {
239 break;
240 }
241 // Go around again.
242 }
243 }
244 return;
245}
246
247UpdateDensePrefixTask::UpdateDensePrefixTask(
248 PSParallelCompact::SpaceId space_id,
249 size_t region_index_start,
250 size_t region_index_end) :
251 _space_id(space_id), _region_index_start(region_index_start),
252 _region_index_end(region_index_end) {}
253
254void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
255
256 ParCompactionManager* cm =
257 ParCompactionManager::gc_thread_compaction_manager(which);
258
259 PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
260 _space_id,
261 _region_index_start,
262 _region_index_end);
263}
264