1/*
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "gc/cms/cmsHeap.hpp"
28#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
29#include "gc/cms/concurrentMarkSweepThread.hpp"
30#include "gc/shared/gcId.hpp"
31#include "memory/universe.hpp"
32#include "oops/oop.inline.hpp"
33#include "runtime/init.hpp"
34#include "runtime/java.hpp"
35#include "runtime/javaCalls.hpp"
36#include "runtime/mutexLocker.hpp"
37#include "runtime/os.hpp"
38#include "runtime/vmThread.hpp"
39
40// ======= Concurrent Mark Sweep Thread ========
41
42ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
43CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
44int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
45
46volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
47
48ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
49 : ConcurrentGCThread() {
50 assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
51 assert(_cmst == NULL, "CMS thread already created");
52 _cmst = this;
53 assert(_collector == NULL, "Collector already set");
54 _collector = collector;
55
56 set_name("CMS Main Thread");
57
58 // An old comment here said: "Priority should be just less
59 // than that of VMThread". Since the VMThread runs at
60 // NearMaxPriority, the old comment was inaccurate, but
61 // changing the default priority to NearMaxPriority-1
62 // could change current behavior, so the default of
63 // NearMaxPriority stays in place.
64 //
65 // Note that there's a possibility of the VMThread
66 // starving if UseCriticalCMSThreadPriority is on.
67 // That won't happen on Solaris for various reasons,
68 // but may well happen on non-Solaris platforms.
69 create_and_start(UseCriticalCMSThreadPriority ? CriticalPriority : NearMaxPriority);
70}
71
72void ConcurrentMarkSweepThread::run_service() {
73 assert(this == cmst(), "just checking");
74
75 if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
76 log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
77 }
78
79 while (!should_terminate()) {
80 sleepBeforeNextCycle();
81 if (should_terminate()) break;
82 GCIdMark gc_id_mark;
83 GCCause::Cause cause = _collector->_full_gc_requested ?
84 _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
85 _collector->collect_in_background(cause);
86 }
87
88 // Check that the state of any protocol for synchronization
89 // between background (CMS) and foreground collector is "clean"
90 // (i.e. will not potentially block the foreground collector,
91 // requiring action by us).
92 verify_ok_to_terminate();
93}
94
95#ifndef PRODUCT
96void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
97 assert(!(CGC_lock->owned_by_self() || cms_thread_has_cms_token() ||
98 cms_thread_wants_cms_token()),
99 "Must renounce all worldly possessions and desires for nirvana");
100 _collector->verify_ok_to_terminate();
101}
102#endif
103
104// create and start a new ConcurrentMarkSweep Thread for given CMS generation
105ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) {
106 guarantee(_cmst == NULL, "start() called twice!");
107 ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
108 assert(_cmst == th, "Where did the just-created CMS thread go?");
109 return th;
110}
111
112void ConcurrentMarkSweepThread::stop_service() {
113 // Now post a notify on CGC_lock so as to nudge
114 // CMS thread(s) that might be slumbering in
115 // sleepBeforeNextCycle.
116 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
117 CGC_lock->notify_all();
118}
119
120void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
121 assert(tc != NULL, "Null ThreadClosure");
122 if (cmst() != NULL && !cmst()->has_terminated()) {
123 tc->do_thread(cmst());
124 }
125 assert(Universe::is_fully_initialized(),
126 "Called too early, make sure heap is fully initialized");
127 if (_collector != NULL) {
128 AbstractWorkGang* gang = _collector->conc_workers();
129 if (gang != NULL) {
130 gang->threads_do(tc);
131 }
132 }
133}
134
135void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
136 if (cmst() != NULL && !cmst()->has_terminated()) {
137 cmst()->print_on(st);
138 st->cr();
139 }
140 if (_collector != NULL) {
141 AbstractWorkGang* gang = _collector->conc_workers();
142 if (gang != NULL) {
143 gang->print_worker_threads_on(st);
144 }
145 }
146}
147
148void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
149 assert(UseConcMarkSweepGC, "just checking");
150
151 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
152 if (!is_cms_thread) {
153 assert(Thread::current()->is_VM_thread(), "Not a VM thread");
154 CMSSynchronousYieldRequest yr;
155 while (CMS_flag_is_set(CMS_cms_has_token)) {
156 // indicate that we want to get the token
157 set_CMS_flag(CMS_vm_wants_token);
158 CGC_lock->wait_without_safepoint_check();
159 }
160 // claim the token and proceed
161 clear_CMS_flag(CMS_vm_wants_token);
162 set_CMS_flag(CMS_vm_has_token);
163 } else {
164 assert(Thread::current()->is_ConcurrentGC_thread(),
165 "Not a CMS thread");
166 // The following barrier assumes there's only one CMS thread.
167 // This will need to be modified is there are more CMS threads than one.
168 while (CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token)) {
169 set_CMS_flag(CMS_cms_wants_token);
170 CGC_lock->wait_without_safepoint_check();
171 }
172 // claim the token
173 clear_CMS_flag(CMS_cms_wants_token);
174 set_CMS_flag(CMS_cms_has_token);
175 }
176}
177
178void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
179 assert(UseConcMarkSweepGC, "just checking");
180
181 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
182 if (!is_cms_thread) {
183 assert(Thread::current()->is_VM_thread(), "Not a VM thread");
184 assert(CMS_flag_is_set(CMS_vm_has_token), "just checking");
185 clear_CMS_flag(CMS_vm_has_token);
186 if (CMS_flag_is_set(CMS_cms_wants_token)) {
187 // wake-up a waiting CMS thread
188 CGC_lock->notify();
189 }
190 assert(!CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token),
191 "Should have been cleared");
192 } else {
193 assert(Thread::current()->is_ConcurrentGC_thread(),
194 "Not a CMS thread");
195 assert(CMS_flag_is_set(CMS_cms_has_token), "just checking");
196 clear_CMS_flag(CMS_cms_has_token);
197 if (CMS_flag_is_set(CMS_vm_wants_token)) {
198 // wake-up a waiting VM thread
199 CGC_lock->notify();
200 }
201 assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
202 "Should have been cleared");
203 }
204}
205
206// Wait until any cms_lock event
207void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
208 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
209 if (should_terminate() || _collector->_full_gc_requested) {
210 return;
211 }
212 set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
213 CGC_lock->wait_without_safepoint_check(t_millis);
214 clear_CMS_flag(CMS_cms_wants_token);
215 assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
216 "Should not be set");
217}
218
219// Wait until the next synchronous GC, a concurrent full gc request,
220// or a timeout, whichever is earlier.
221void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
222 // Wait time in millis or 0 value representing infinite wait for a scavenge
223 assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
224
225 CMSHeap* heap = CMSHeap::heap();
226 double start_time_secs = os::elapsedTime();
227 double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
228
229 // Total collections count before waiting loop
230 unsigned int before_count;
231 {
232 MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
233 before_count = heap->total_collections();
234 }
235
236 unsigned int loop_count = 0;
237
238 while(!should_terminate()) {
239 double now_time = os::elapsedTime();
240 long wait_time_millis;
241
242 if(t_millis != 0) {
243 // New wait limit
244 wait_time_millis = (long) ((end_time_secs - now_time) * MILLIUNITS);
245 if(wait_time_millis <= 0) {
246 // Wait time is over
247 break;
248 }
249 } else {
250 // No wait limit, wait if necessary forever
251 wait_time_millis = 0;
252 }
253
254 // Wait until the next event or the remaining timeout
255 {
256 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
257
258 if (should_terminate() || _collector->_full_gc_requested) {
259 return;
260 }
261 set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
262 assert(t_millis == 0 || wait_time_millis > 0, "Sanity");
263 CGC_lock->wait_without_safepoint_check(wait_time_millis);
264 clear_CMS_flag(CMS_cms_wants_token);
265 assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
266 "Should not be set");
267 }
268
269 // Extra wait time check before entering the heap lock to get the collection count
270 if(t_millis != 0 && os::elapsedTime() >= end_time_secs) {
271 // Wait time is over
272 break;
273 }
274
275 // Total collections count after the event
276 unsigned int after_count;
277 {
278 MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
279 after_count = heap->total_collections();
280 }
281
282 if(before_count != after_count) {
283 // There was a collection - success
284 break;
285 }
286
287 // Too many loops warning
288 if(++loop_count == 0) {
289 log_warning(gc)("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
290 }
291 }
292}
293
294void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
295 while (!should_terminate()) {
296 if(CMSWaitDuration >= 0) {
297 // Wait until the next synchronous GC, a concurrent full gc
298 // request or a timeout, whichever is earlier.
299 wait_on_cms_lock_for_scavenge(CMSWaitDuration);
300 } else {
301 // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
302 wait_on_cms_lock(CMSCheckInterval);
303 }
304 // Check if we should start a CMS collection cycle
305 if (_collector->shouldConcurrentCollect()) {
306 return;
307 }
308 // .. collection criterion not yet met, let's go back
309 // and wait some more
310 }
311}
312