1/*
2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24#include "precompiled.hpp"
25#include "memory/allocation.hpp"
26#include "memory/universe.hpp"
27
28#include "gc/shared/gcArguments.hpp"
29#include "gc/shared/gcTimer.hpp"
30#include "gc/shared/gcTraceTime.inline.hpp"
31#include "gc/shared/memAllocator.hpp"
32#include "gc/shared/parallelCleaning.hpp"
33#include "gc/shared/plab.hpp"
34
35#include "gc/shenandoah/shenandoahAllocTracker.hpp"
36#include "gc/shenandoah/shenandoahBarrierSet.hpp"
37#include "gc/shenandoah/shenandoahClosures.inline.hpp"
38#include "gc/shenandoah/shenandoahCollectionSet.hpp"
39#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
40#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
41#include "gc/shenandoah/shenandoahControlThread.hpp"
42#include "gc/shenandoah/shenandoahFreeSet.hpp"
43#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
44#include "gc/shenandoah/shenandoahHeap.inline.hpp"
45#include "gc/shenandoah/shenandoahHeapRegion.hpp"
46#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
47#include "gc/shenandoah/shenandoahMarkCompact.hpp"
48#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
49#include "gc/shenandoah/shenandoahMemoryPool.hpp"
50#include "gc/shenandoah/shenandoahMetrics.hpp"
51#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
52#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
53#include "gc/shenandoah/shenandoahPacer.inline.hpp"
54#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
55#include "gc/shenandoah/shenandoahStringDedup.hpp"
56#include "gc/shenandoah/shenandoahTaskqueue.hpp"
57#include "gc/shenandoah/shenandoahUtils.hpp"
58#include "gc/shenandoah/shenandoahVerifier.hpp"
59#include "gc/shenandoah/shenandoahCodeRoots.hpp"
60#include "gc/shenandoah/shenandoahVMOperations.hpp"
61#include "gc/shenandoah/shenandoahWorkGroup.hpp"
62#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
63#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
64#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
65#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
66#include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
67#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
68#include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
69#if INCLUDE_JFR
70#include "gc/shenandoah/shenandoahJfrSupport.hpp"
71#endif
72
73#include "memory/metaspace.hpp"
74#include "oops/compressedOops.inline.hpp"
75#include "runtime/globals.hpp"
76#include "runtime/interfaceSupport.inline.hpp"
77#include "runtime/safepointMechanism.hpp"
78#include "runtime/vmThread.hpp"
79#include "services/mallocTracker.hpp"
80
81#ifdef ASSERT
82template <class T>
83void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
84 T o = RawAccess<>::oop_load(p);
85 if (! CompressedOops::is_null(o)) {
86 oop obj = CompressedOops::decode_not_null(o);
87 shenandoah_assert_not_forwarded(p, obj);
88 }
89}
90
91void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
92void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_work(p); }
93#endif
94
95class ShenandoahPretouchHeapTask : public AbstractGangTask {
96private:
97 ShenandoahRegionIterator _regions;
98 const size_t _page_size;
99public:
100 ShenandoahPretouchHeapTask(size_t page_size) :
101 AbstractGangTask("Shenandoah Pretouch Heap"),
102 _page_size(page_size) {}
103
104 virtual void work(uint worker_id) {
105 ShenandoahHeapRegion* r = _regions.next();
106 while (r != NULL) {
107 os::pretouch_memory(r->bottom(), r->end(), _page_size);
108 r = _regions.next();
109 }
110 }
111};
112
113class ShenandoahPretouchBitmapTask : public AbstractGangTask {
114private:
115 ShenandoahRegionIterator _regions;
116 char* _bitmap_base;
117 const size_t _bitmap_size;
118 const size_t _page_size;
119public:
120 ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
121 AbstractGangTask("Shenandoah Pretouch Bitmap"),
122 _bitmap_base(bitmap_base),
123 _bitmap_size(bitmap_size),
124 _page_size(page_size) {}
125
126 virtual void work(uint worker_id) {
127 ShenandoahHeapRegion* r = _regions.next();
128 while (r != NULL) {
129 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
130 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
131 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
132
133 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
134
135 r = _regions.next();
136 }
137 }
138};
139
140jint ShenandoahHeap::initialize() {
141 initialize_heuristics();
142
143 //
144 // Figure out heap sizing
145 //
146
147 size_t init_byte_size = InitialHeapSize;
148 size_t min_byte_size = MinHeapSize;
149 size_t max_byte_size = MaxHeapSize;
150 size_t heap_alignment = HeapAlignment;
151
152 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
153
154 if (ShenandoahAlwaysPreTouch) {
155 // Enabled pre-touch means the entire heap is committed right away.
156 init_byte_size = max_byte_size;
157 }
158
159 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
160 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
161
162 _num_regions = ShenandoahHeapRegion::region_count();
163
164 size_t num_committed_regions = init_byte_size / reg_size_bytes;
165 num_committed_regions = MIN2(num_committed_regions, _num_regions);
166 assert(num_committed_regions <= _num_regions, "sanity");
167 _initial_size = num_committed_regions * reg_size_bytes;
168
169 size_t num_min_regions = min_byte_size / reg_size_bytes;
170 num_min_regions = MIN2(num_min_regions, _num_regions);
171 assert(num_min_regions <= _num_regions, "sanity");
172 _minimum_size = num_min_regions * reg_size_bytes;
173
174 _committed = _initial_size;
175
176 size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
177 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
178
179 //
180 // Reserve and commit memory for heap
181 //
182
183 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
184 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
185 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
186 _heap_region_special = heap_rs.special();
187
188 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
189 "Misaligned heap: " PTR_FORMAT, p2i(base()));
190
191#if SHENANDOAH_OPTIMIZED_OBJTASK
192 // The optimized ObjArrayChunkedTask takes some bits away from the full object bits.
193 // Fail if we ever attempt to address more than we can.
194 if ((uintptr_t)heap_rs.end() >= ObjArrayChunkedTask::max_addressable()) {
195 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
196 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
197 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
198 p2i(heap_rs.base()), p2i(heap_rs.end()), ObjArrayChunkedTask::max_addressable());
199 vm_exit_during_initialization("Fatal Error", buf);
200 }
201#endif
202
203 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
204 if (!_heap_region_special) {
205 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
206 "Cannot commit heap memory");
207 }
208
209 //
210 // Reserve and commit memory for bitmap(s)
211 //
212
213 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
214 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
215
216 size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
217
218 guarantee(bitmap_bytes_per_region != 0,
219 "Bitmap bytes per region should not be zero");
220 guarantee(is_power_of_2(bitmap_bytes_per_region),
221 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
222
223 if (bitmap_page_size > bitmap_bytes_per_region) {
224 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
225 _bitmap_bytes_per_slice = bitmap_page_size;
226 } else {
227 _bitmap_regions_per_slice = 1;
228 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
229 }
230
231 guarantee(_bitmap_regions_per_slice >= 1,
232 "Should have at least one region per slice: " SIZE_FORMAT,
233 _bitmap_regions_per_slice);
234
235 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
236 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
237 _bitmap_bytes_per_slice, bitmap_page_size);
238
239 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
240 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
241 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
242 _bitmap_region_special = bitmap.special();
243
244 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
245 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
246 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
247 if (!_bitmap_region_special) {
248 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
249 "Cannot commit bitmap memory");
250 }
251
252 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
253
254 if (ShenandoahVerify) {
255 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
256 if (!verify_bitmap.special()) {
257 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
258 "Cannot commit verification bitmap memory");
259 }
260 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
261 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
262 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
263 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
264 }
265
266 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
267 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
268 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
269 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
270 _aux_bitmap_region_special = aux_bitmap.special();
271 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
272
273 //
274 // Create regions and region sets
275 //
276
277 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
278 _free_set = new ShenandoahFreeSet(this, _num_regions);
279 _collection_set = new ShenandoahCollectionSet(this, sh_rs.base(), sh_rs.size());
280
281 {
282 ShenandoahHeapLocker locker(lock());
283
284 size_t size_words = ShenandoahHeapRegion::region_size_words();
285
286 for (size_t i = 0; i < _num_regions; i++) {
287 HeapWord* start = (HeapWord*)sh_rs.base() + size_words * i;
288 bool is_committed = i < num_committed_regions;
289 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, start, size_words, i, is_committed);
290
291 _marking_context->initialize_top_at_mark_start(r);
292 _regions[i] = r;
293 assert(!collection_set()->is_in(i), "New region should not be in collection set");
294 }
295
296 // Initialize to complete
297 _marking_context->mark_complete();
298
299 _free_set->rebuild();
300 }
301
302 if (ShenandoahAlwaysPreTouch) {
303 assert(!AlwaysPreTouch, "Should have been overridden");
304
305 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
306 // before initialize() below zeroes it with initializing thread. For any given region,
307 // we touch the region and the corresponding bitmaps from the same thread.
308 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
309
310 size_t pretouch_heap_page_size = heap_page_size;
311 size_t pretouch_bitmap_page_size = bitmap_page_size;
312
313#ifdef LINUX
314 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
315 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
316 // them into huge one. Therefore, we need to pretouch with smaller pages.
317 if (UseTransparentHugePages) {
318 pretouch_heap_page_size = (size_t)os::vm_page_size();
319 pretouch_bitmap_page_size = (size_t)os::vm_page_size();
320 }
321#endif
322
323 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
324 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
325
326 log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
327 _num_regions, pretouch_bitmap_page_size);
328 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size);
329 _workers->run_task(&bcl);
330
331 log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
332 _num_regions, pretouch_heap_page_size);
333 ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size);
334 _workers->run_task(&hcl);
335 }
336
337 //
338 // Initialize the rest of GC subsystems
339 //
340
341 _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC);
342 for (uint worker = 0; worker < _max_workers; worker++) {
343 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC);
344 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort));
345 }
346
347 // The call below uses stuff (the SATB* things) that are in G1, but probably
348 // belong into a shared location.
349 ShenandoahBarrierSet::satb_mark_queue_set().initialize(this,
350 SATB_Q_CBL_mon,
351 20 /* G1SATBProcessCompletedThreshold */,
352 60 /* G1SATBBufferEnqueueingThresholdPercent */);
353
354 _monitoring_support = new ShenandoahMonitoringSupport(this);
355 _phase_timings = new ShenandoahPhaseTimings();
356 ShenandoahStringDedup::initialize();
357 ShenandoahCodeRoots::initialize();
358
359 if (ShenandoahAllocationTrace) {
360 _alloc_tracker = new ShenandoahAllocTracker();
361 }
362
363 if (ShenandoahPacing) {
364 _pacer = new ShenandoahPacer(this);
365 _pacer->setup_for_idle();
366 } else {
367 _pacer = NULL;
368 }
369
370 _traversal_gc = heuristics()->can_do_traversal_gc() ?
371 new ShenandoahTraversalGC(this, _num_regions) :
372 NULL;
373
374 _control_thread = new ShenandoahControlThread();
375
376 log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
377 byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size),
378 byte_size_in_proper_unit(_minimum_size), proper_unit_for_byte_size(_minimum_size),
379 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
380 );
381
382 log_info(gc, init)("Safepointing mechanism: %s",
383 SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
384 (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
385
386 return JNI_OK;
387}
388
389void ShenandoahHeap::initialize_heuristics() {
390 if (ShenandoahGCHeuristics != NULL) {
391 if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
392 _heuristics = new ShenandoahAggressiveHeuristics();
393 } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
394 _heuristics = new ShenandoahStaticHeuristics();
395 } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
396 _heuristics = new ShenandoahAdaptiveHeuristics();
397 } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
398 _heuristics = new ShenandoahPassiveHeuristics();
399 } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
400 _heuristics = new ShenandoahCompactHeuristics();
401 } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
402 _heuristics = new ShenandoahTraversalHeuristics();
403 } else {
404 vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
405 }
406
407 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
408 vm_exit_during_initialization(
409 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
410 _heuristics->name()));
411 }
412 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
413 vm_exit_during_initialization(
414 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
415 _heuristics->name()));
416 }
417 log_info(gc, init)("Shenandoah heuristics: %s",
418 _heuristics->name());
419 } else {
420 ShouldNotReachHere();
421 }
422
423}
424
425#ifdef _MSC_VER
426#pragma warning( push )
427#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
428#endif
429
430ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
431 CollectedHeap(),
432 _initial_size(0),
433 _used(0),
434 _committed(0),
435 _bytes_allocated_since_gc_start(0),
436 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
437 _workers(NULL),
438 _safepoint_workers(NULL),
439 _heap_region_special(false),
440 _num_regions(0),
441 _regions(NULL),
442 _update_refs_iterator(this),
443 _control_thread(NULL),
444 _shenandoah_policy(policy),
445 _heuristics(NULL),
446 _free_set(NULL),
447 _scm(new ShenandoahConcurrentMark()),
448 _traversal_gc(NULL),
449 _full_gc(new ShenandoahMarkCompact()),
450 _pacer(NULL),
451 _verifier(NULL),
452 _alloc_tracker(NULL),
453 _phase_timings(NULL),
454 _monitoring_support(NULL),
455 _memory_pool(NULL),
456 _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
457 _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
458 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
459 _soft_ref_policy(),
460 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
461 _ref_processor(NULL),
462 _marking_context(NULL),
463 _bitmap_size(0),
464 _bitmap_regions_per_slice(0),
465 _bitmap_bytes_per_slice(0),
466 _bitmap_region_special(false),
467 _aux_bitmap_region_special(false),
468 _liveness_cache(NULL),
469 _collection_set(NULL)
470{
471 log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
472 log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
473
474 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
475
476 _max_workers = MAX2(_max_workers, 1U);
477 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
478 /* are_GC_task_threads */ true,
479 /* are_ConcurrentGC_threads */ true);
480 if (_workers == NULL) {
481 vm_exit_during_initialization("Failed necessary allocation.");
482 } else {
483 _workers->initialize_workers();
484 }
485
486 if (ShenandoahParallelSafepointThreads > 1) {
487 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
488 ShenandoahParallelSafepointThreads,
489 /* are_GC_task_threads */ false,
490 /* are_ConcurrentGC_threads */ false);
491 _safepoint_workers->initialize_workers();
492 }
493}
494
495#ifdef _MSC_VER
496#pragma warning( pop )
497#endif
498
499class ShenandoahResetBitmapTask : public AbstractGangTask {
500private:
501 ShenandoahRegionIterator _regions;
502
503public:
504 ShenandoahResetBitmapTask() :
505 AbstractGangTask("Parallel Reset Bitmap Task") {}
506
507 void work(uint worker_id) {
508 ShenandoahHeapRegion* region = _regions.next();
509 ShenandoahHeap* heap = ShenandoahHeap::heap();
510 ShenandoahMarkingContext* const ctx = heap->marking_context();
511 while (region != NULL) {
512 if (heap->is_bitmap_slice_committed(region)) {
513 ctx->clear_bitmap(region);
514 }
515 region = _regions.next();
516 }
517 }
518};
519
520void ShenandoahHeap::reset_mark_bitmap() {
521 assert_gc_workers(_workers->active_workers());
522 mark_incomplete_marking_context();
523
524 ShenandoahResetBitmapTask task;
525 _workers->run_task(&task);
526}
527
528void ShenandoahHeap::print_on(outputStream* st) const {
529 st->print_cr("Shenandoah Heap");
530 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
531 max_capacity() / K, committed() / K, used() / K);
532 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
533 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
534
535 st->print("Status: ");
536 if (has_forwarded_objects()) st->print("has forwarded objects, ");
537 if (is_concurrent_mark_in_progress()) st->print("marking, ");
538 if (is_evacuation_in_progress()) st->print("evacuating, ");
539 if (is_update_refs_in_progress()) st->print("updating refs, ");
540 if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
541 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
542 if (is_full_gc_in_progress()) st->print("full gc, ");
543 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
544
545 if (cancelled_gc()) {
546 st->print("cancelled");
547 } else {
548 st->print("not cancelled");
549 }
550 st->cr();
551
552 st->print_cr("Reserved region:");
553 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
554 p2i(reserved_region().start()),
555 p2i(reserved_region().end()));
556
557 ShenandoahCollectionSet* cset = collection_set();
558 st->print_cr("Collection set:");
559 if (cset != NULL) {
560 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
561 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
562 } else {
563 st->print_cr(" (NULL)");
564 }
565
566 st->cr();
567 MetaspaceUtils::print_on(st);
568
569 if (Verbose) {
570 print_heap_regions_on(st);
571 }
572}
573
574class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
575public:
576 void do_thread(Thread* thread) {
577 assert(thread != NULL, "Sanity");
578 assert(thread->is_Worker_thread(), "Only worker thread expected");
579 ShenandoahThreadLocalData::initialize_gclab(thread);
580 }
581};
582
583void ShenandoahHeap::post_initialize() {
584 CollectedHeap::post_initialize();
585 MutexLocker ml(Threads_lock);
586
587 ShenandoahInitWorkerGCLABClosure init_gclabs;
588 _workers->threads_do(&init_gclabs);
589
590 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
591 // Now, we will let WorkGang to initialize gclab when new worker is created.
592 _workers->set_initialize_gclab();
593
594 _scm->initialize(_max_workers);
595 _full_gc->initialize(_gc_timer);
596
597 ref_processing_init();
598
599 _heuristics->initialize();
600
601 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
602}
603
604size_t ShenandoahHeap::used() const {
605 return OrderAccess::load_acquire(&_used);
606}
607
608size_t ShenandoahHeap::committed() const {
609 OrderAccess::acquire();
610 return _committed;
611}
612
613void ShenandoahHeap::increase_committed(size_t bytes) {
614 assert_heaplock_or_safepoint();
615 _committed += bytes;
616}
617
618void ShenandoahHeap::decrease_committed(size_t bytes) {
619 assert_heaplock_or_safepoint();
620 _committed -= bytes;
621}
622
623void ShenandoahHeap::increase_used(size_t bytes) {
624 Atomic::add(bytes, &_used);
625}
626
627void ShenandoahHeap::set_used(size_t bytes) {
628 OrderAccess::release_store_fence(&_used, bytes);
629}
630
631void ShenandoahHeap::decrease_used(size_t bytes) {
632 assert(used() >= bytes, "never decrease heap size by more than we've left");
633 Atomic::sub(bytes, &_used);
634}
635
636void ShenandoahHeap::increase_allocated(size_t bytes) {
637 Atomic::add(bytes, &_bytes_allocated_since_gc_start);
638}
639
640void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
641 size_t bytes = words * HeapWordSize;
642 if (!waste) {
643 increase_used(bytes);
644 }
645 increase_allocated(bytes);
646 if (ShenandoahPacing) {
647 control_thread()->pacing_notify_alloc(words);
648 if (waste) {
649 pacer()->claim_for_alloc(words, true);
650 }
651 }
652}
653
654size_t ShenandoahHeap::capacity() const {
655 return committed();
656}
657
658size_t ShenandoahHeap::max_capacity() const {
659 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
660}
661
662size_t ShenandoahHeap::min_capacity() const {
663 return _minimum_size;
664}
665
666size_t ShenandoahHeap::initial_capacity() const {
667 return _initial_size;
668}
669
670bool ShenandoahHeap::is_in(const void* p) const {
671 HeapWord* heap_base = (HeapWord*) base();
672 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
673 return p >= heap_base && p < last_region_end;
674}
675
676void ShenandoahHeap::op_uncommit(double shrink_before) {
677 assert (ShenandoahUncommit, "should be enabled");
678
679 // Application allocates from the beginning of the heap, and GC allocates at
680 // the end of it. It is more efficient to uncommit from the end, so that applications
681 // could enjoy the near committed regions. GC allocations are much less frequent,
682 // and therefore can accept the committing costs.
683
684 size_t count = 0;
685 for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
686 ShenandoahHeapRegion* r = get_region(i - 1);
687 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
688 ShenandoahHeapLocker locker(lock());
689 if (r->is_empty_committed()) {
690 // Do not uncommit below minimal capacity
691 if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) {
692 break;
693 }
694
695 r->make_uncommitted();
696 count++;
697 }
698 }
699 SpinPause(); // allow allocators to take the lock
700 }
701
702 if (count > 0) {
703 control_thread()->notify_heap_changed();
704 }
705}
706
707HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
708 // New object should fit the GCLAB size
709 size_t min_size = MAX2(size, PLAB::min_size());
710
711 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
712 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
713 new_size = MIN2(new_size, PLAB::max_size());
714 new_size = MAX2(new_size, PLAB::min_size());
715
716 // Record new heuristic value even if we take any shortcut. This captures
717 // the case when moderately-sized objects always take a shortcut. At some point,
718 // heuristics should catch up with them.
719 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
720
721 if (new_size < size) {
722 // New size still does not fit the object. Fall back to shared allocation.
723 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
724 return NULL;
725 }
726
727 // Retire current GCLAB, and allocate a new one.
728 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
729 gclab->retire();
730
731 size_t actual_size = 0;
732 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
733 if (gclab_buf == NULL) {
734 return NULL;
735 }
736
737 assert (size <= actual_size, "allocation should fit");
738
739 if (ZeroTLAB) {
740 // ..and clear it.
741 Copy::zero_to_words(gclab_buf, actual_size);
742 } else {
743 // ...and zap just allocated object.
744#ifdef ASSERT
745 // Skip mangling the space corresponding to the object header to
746 // ensure that the returned space is not considered parsable by
747 // any concurrent GC thread.
748 size_t hdr_size = oopDesc::header_size();
749 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
750#endif // ASSERT
751 }
752 gclab->set_buf(gclab_buf, actual_size);
753 return gclab->allocate(size);
754}
755
756HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
757 size_t requested_size,
758 size_t* actual_size) {
759 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
760 HeapWord* res = allocate_memory(req);
761 if (res != NULL) {
762 *actual_size = req.actual_size();
763 } else {
764 *actual_size = 0;
765 }
766 return res;
767}
768
769HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
770 size_t word_size,
771 size_t* actual_size) {
772 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
773 HeapWord* res = allocate_memory(req);
774 if (res != NULL) {
775 *actual_size = req.actual_size();
776 } else {
777 *actual_size = 0;
778 }
779 return res;
780}
781
782ShenandoahHeap* ShenandoahHeap::heap() {
783 CollectedHeap* heap = Universe::heap();
784 assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
785 assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
786 return (ShenandoahHeap*) heap;
787}
788
789ShenandoahHeap* ShenandoahHeap::heap_no_check() {
790 CollectedHeap* heap = Universe::heap();
791 return (ShenandoahHeap*) heap;
792}
793
794HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
795 ShenandoahAllocTrace trace_alloc(req.size(), req.type());
796
797 intptr_t pacer_epoch = 0;
798 bool in_new_region = false;
799 HeapWord* result = NULL;
800
801 if (req.is_mutator_alloc()) {
802 if (ShenandoahPacing) {
803 pacer()->pace_for_alloc(req.size());
804 pacer_epoch = pacer()->epoch();
805 }
806
807 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
808 result = allocate_memory_under_lock(req, in_new_region);
809 }
810
811 // Allocation failed, block until control thread reacted, then retry allocation.
812 //
813 // It might happen that one of the threads requesting allocation would unblock
814 // way later after GC happened, only to fail the second allocation, because
815 // other threads have already depleted the free storage. In this case, a better
816 // strategy is to try again, as long as GC makes progress.
817 //
818 // Then, we need to make sure the allocation was retried after at least one
819 // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
820
821 size_t tries = 0;
822
823 while (result == NULL && _progress_last_gc.is_set()) {
824 tries++;
825 control_thread()->handle_alloc_failure(req.size());
826 result = allocate_memory_under_lock(req, in_new_region);
827 }
828
829 while (result == NULL && tries <= ShenandoahFullGCThreshold) {
830 tries++;
831 control_thread()->handle_alloc_failure(req.size());
832 result = allocate_memory_under_lock(req, in_new_region);
833 }
834
835 } else {
836 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
837 result = allocate_memory_under_lock(req, in_new_region);
838 // Do not call handle_alloc_failure() here, because we cannot block.
839 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
840 }
841
842 if (in_new_region) {
843 control_thread()->notify_heap_changed();
844 }
845
846 if (result != NULL) {
847 size_t requested = req.size();
848 size_t actual = req.actual_size();
849
850 assert (req.is_lab_alloc() || (requested == actual),
851 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
852 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
853
854 if (req.is_mutator_alloc()) {
855 notify_mutator_alloc_words(actual, false);
856
857 // If we requested more than we were granted, give the rest back to pacer.
858 // This only matters if we are in the same pacing epoch: do not try to unpace
859 // over the budget for the other phase.
860 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
861 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
862 }
863 } else {
864 increase_used(actual*HeapWordSize);
865 }
866 }
867
868 return result;
869}
870
871HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
872 ShenandoahHeapLocker locker(lock());
873 return _free_set->allocate(req, in_new_region);
874}
875
876HeapWord* ShenandoahHeap::mem_allocate(size_t size,
877 bool* gc_overhead_limit_was_exceeded) {
878 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
879 return allocate_memory(req);
880}
881
882MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
883 size_t size,
884 Metaspace::MetadataType mdtype) {
885 MetaWord* result;
886
887 // Inform metaspace OOM to GC heuristics if class unloading is possible.
888 if (heuristics()->can_unload_classes()) {
889 ShenandoahHeuristics* h = heuristics();
890 h->record_metaspace_oom();
891 }
892
893 // Expand and retry allocation
894 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
895 if (result != NULL) {
896 return result;
897 }
898
899 // Start full GC
900 collect(GCCause::_metadata_GC_clear_soft_refs);
901
902 // Retry allocation
903 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
904 if (result != NULL) {
905 return result;
906 }
907
908 // Expand and retry allocation
909 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
910 if (result != NULL) {
911 return result;
912 }
913
914 // Out of memory
915 return NULL;
916}
917
918class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
919private:
920 ShenandoahHeap* const _heap;
921 Thread* const _thread;
922public:
923 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
924 _heap(heap), _thread(Thread::current()) {}
925
926 void do_object(oop p) {
927 shenandoah_assert_marked(NULL, p);
928 if (!p->is_forwarded()) {
929 _heap->evacuate_object(p, _thread);
930 }
931 }
932};
933
934class ShenandoahEvacuationTask : public AbstractGangTask {
935private:
936 ShenandoahHeap* const _sh;
937 ShenandoahCollectionSet* const _cs;
938 bool _concurrent;
939public:
940 ShenandoahEvacuationTask(ShenandoahHeap* sh,
941 ShenandoahCollectionSet* cs,
942 bool concurrent) :
943 AbstractGangTask("Parallel Evacuation Task"),
944 _sh(sh),
945 _cs(cs),
946 _concurrent(concurrent)
947 {}
948
949 void work(uint worker_id) {
950 if (_concurrent) {
951 ShenandoahConcurrentWorkerSession worker_session(worker_id);
952 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
953 ShenandoahEvacOOMScope oom_evac_scope;
954 do_work();
955 } else {
956 ShenandoahParallelWorkerSession worker_session(worker_id);
957 ShenandoahEvacOOMScope oom_evac_scope;
958 do_work();
959 }
960 }
961
962private:
963 void do_work() {
964 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
965 ShenandoahHeapRegion* r;
966 while ((r =_cs->claim_next()) != NULL) {
967 assert(r->has_live(), "all-garbage regions are reclaimed early");
968 _sh->marked_object_iterate(r, &cl);
969
970 if (ShenandoahPacing) {
971 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
972 }
973
974 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
975 break;
976 }
977 }
978 }
979};
980
981void ShenandoahHeap::trash_cset_regions() {
982 ShenandoahHeapLocker locker(lock());
983
984 ShenandoahCollectionSet* set = collection_set();
985 ShenandoahHeapRegion* r;
986 set->clear_current_index();
987 while ((r = set->next()) != NULL) {
988 r->make_trash();
989 }
990 collection_set()->clear();
991}
992
993void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
994 st->print_cr("Heap Regions:");
995 st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
996 st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
997 st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
998 st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
999
1000 for (size_t i = 0; i < num_regions(); i++) {
1001 get_region(i)->print_on(st);
1002 }
1003}
1004
1005void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1006 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1007
1008 oop humongous_obj = oop(start->bottom());
1009 size_t size = humongous_obj->size();
1010 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1011 size_t index = start->region_number() + required_regions - 1;
1012
1013 assert(!start->has_live(), "liveness must be zero");
1014
1015 for(size_t i = 0; i < required_regions; i++) {
1016 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1017 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1018 ShenandoahHeapRegion* region = get_region(index --);
1019
1020 assert(region->is_humongous(), "expect correct humongous start or continuation");
1021 assert(!region->is_cset(), "Humongous region should not be in collection set");
1022
1023 region->make_trash_immediate();
1024 }
1025}
1026
1027class ShenandoahRetireGCLABClosure : public ThreadClosure {
1028public:
1029 void do_thread(Thread* thread) {
1030 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1031 assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1032 gclab->retire();
1033 }
1034};
1035
1036void ShenandoahHeap::make_parsable(bool retire_tlabs) {
1037 if (UseTLAB) {
1038 CollectedHeap::ensure_parsability(retire_tlabs);
1039 }
1040 ShenandoahRetireGCLABClosure cl;
1041 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1042 cl.do_thread(t);
1043 }
1044 workers()->threads_do(&cl);
1045}
1046
1047void ShenandoahHeap::resize_tlabs() {
1048 CollectedHeap::resize_all_tlabs();
1049}
1050
1051class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1052private:
1053 ShenandoahRootEvacuator* _rp;
1054
1055public:
1056 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1057 AbstractGangTask("Shenandoah evacuate and update roots"),
1058 _rp(rp) {}
1059
1060 void work(uint worker_id) {
1061 ShenandoahParallelWorkerSession worker_session(worker_id);
1062 ShenandoahEvacOOMScope oom_evac_scope;
1063 ShenandoahEvacuateUpdateRootsClosure cl;
1064 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1065 _rp->roots_do(worker_id, &cl);
1066 }
1067};
1068
1069void ShenandoahHeap::evacuate_and_update_roots() {
1070#if COMPILER2_OR_JVMCI
1071 DerivedPointerTable::clear();
1072#endif
1073 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1074
1075 {
1076 ShenandoahRootEvacuator rp(workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1077 ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1078 workers()->run_task(&roots_task);
1079 }
1080
1081#if COMPILER2_OR_JVMCI
1082 DerivedPointerTable::update_pointers();
1083#endif
1084}
1085
1086// Returns size in bytes
1087size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1088 if (ShenandoahElasticTLAB) {
1089 // With Elastic TLABs, return the max allowed size, and let the allocation path
1090 // figure out the safe size for current allocation.
1091 return ShenandoahHeapRegion::max_tlab_size_bytes();
1092 } else {
1093 return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1094 }
1095}
1096
1097size_t ShenandoahHeap::max_tlab_size() const {
1098 // Returns size in words
1099 return ShenandoahHeapRegion::max_tlab_size_words();
1100}
1101
1102class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1103public:
1104 void do_thread(Thread* thread) {
1105 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1106 gclab->retire();
1107 if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1108 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1109 }
1110 }
1111};
1112
1113void ShenandoahHeap::retire_and_reset_gclabs() {
1114 ShenandoahRetireAndResetGCLABClosure cl;
1115 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1116 cl.do_thread(t);
1117 }
1118 workers()->threads_do(&cl);
1119}
1120
1121void ShenandoahHeap::collect(GCCause::Cause cause) {
1122 control_thread()->request_gc(cause);
1123}
1124
1125void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1126 //assert(false, "Shouldn't need to do full collections");
1127}
1128
1129HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1130 Space* sp = heap_region_containing(addr);
1131 if (sp != NULL) {
1132 return sp->block_start(addr);
1133 }
1134 return NULL;
1135}
1136
1137bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1138 Space* sp = heap_region_containing(addr);
1139 return sp->block_is_obj(addr);
1140}
1141
1142jlong ShenandoahHeap::millis_since_last_gc() {
1143 double v = heuristics()->time_since_last_gc() * 1000;
1144 assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
1145 return (jlong)v;
1146}
1147
1148void ShenandoahHeap::prepare_for_verify() {
1149 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1150 make_parsable(false);
1151 }
1152}
1153
1154void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1155 workers()->print_worker_threads_on(st);
1156 if (ShenandoahStringDedup::is_enabled()) {
1157 ShenandoahStringDedup::print_worker_threads_on(st);
1158 }
1159}
1160
1161void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1162 workers()->threads_do(tcl);
1163 if (_safepoint_workers != NULL) {
1164 _safepoint_workers->threads_do(tcl);
1165 }
1166 if (ShenandoahStringDedup::is_enabled()) {
1167 ShenandoahStringDedup::threads_do(tcl);
1168 }
1169}
1170
1171void ShenandoahHeap::print_tracing_info() const {
1172 LogTarget(Info, gc, stats) lt;
1173 if (lt.is_enabled()) {
1174 ResourceMark rm;
1175 LogStream ls(lt);
1176
1177 phase_timings()->print_on(&ls);
1178
1179 ls.cr();
1180 ls.cr();
1181
1182 shenandoah_policy()->print_gc_stats(&ls);
1183
1184 ls.cr();
1185 ls.cr();
1186
1187 if (ShenandoahPacing) {
1188 pacer()->print_on(&ls);
1189 }
1190
1191 ls.cr();
1192 ls.cr();
1193
1194 if (ShenandoahAllocationTrace) {
1195 assert(alloc_tracker() != NULL, "Must be");
1196 alloc_tracker()->print_on(&ls);
1197 } else {
1198 ls.print_cr(" Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1199 }
1200 }
1201}
1202
1203void ShenandoahHeap::verify(VerifyOption vo) {
1204 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1205 if (ShenandoahVerify) {
1206 verifier()->verify_generic(vo);
1207 } else {
1208 // TODO: Consider allocating verification bitmaps on demand,
1209 // and turn this on unconditionally.
1210 }
1211 }
1212}
1213size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1214 return _free_set->capacity();
1215}
1216
1217class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1218private:
1219 MarkBitMap* _bitmap;
1220 Stack<oop,mtGC>* _oop_stack;
1221
1222 template <class T>
1223 void do_oop_work(T* p) {
1224 T o = RawAccess<>::oop_load(p);
1225 if (!CompressedOops::is_null(o)) {
1226 oop obj = CompressedOops::decode_not_null(o);
1227 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1228 assert(oopDesc::is_oop(obj), "must be a valid oop");
1229 if (!_bitmap->is_marked((HeapWord*) obj)) {
1230 _bitmap->mark((HeapWord*) obj);
1231 _oop_stack->push(obj);
1232 }
1233 }
1234 }
1235public:
1236 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1237 _bitmap(bitmap), _oop_stack(oop_stack) {}
1238 void do_oop(oop* p) { do_oop_work(p); }
1239 void do_oop(narrowOop* p) { do_oop_work(p); }
1240};
1241
1242/*
1243 * This is public API, used in preparation of object_iterate().
1244 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1245 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1246 * control, we call SH::make_tlabs_parsable().
1247 */
1248void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1249 // No-op.
1250}
1251
1252/*
1253 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1254 *
1255 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1256 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1257 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1258 * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1259 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1260 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1261 * wiped the bitmap in preparation for next marking).
1262 *
1263 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1264 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1265 * is allowed to report dead objects, but is not required to do so.
1266 */
1267void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1268 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1269 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1270 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1271 return;
1272 }
1273
1274 // Reset bitmap
1275 _aux_bit_map.clear();
1276
1277 Stack<oop,mtGC> oop_stack;
1278
1279 // First, we process all GC roots. This populates the work stack with initial objects.
1280 ShenandoahAllRootScanner rp(1, ShenandoahPhaseTimings::_num_phases);
1281 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1282 rp.roots_do_unchecked(&oops);
1283
1284 // Work through the oop stack to traverse heap.
1285 while (! oop_stack.is_empty()) {
1286 oop obj = oop_stack.pop();
1287 assert(oopDesc::is_oop(obj), "must be a valid oop");
1288 cl->do_object(obj);
1289 obj->oop_iterate(&oops);
1290 }
1291
1292 assert(oop_stack.is_empty(), "should be empty");
1293
1294 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1295 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1296 }
1297}
1298
1299void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1300 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1301 object_iterate(cl);
1302}
1303
1304void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1305 for (size_t i = 0; i < num_regions(); i++) {
1306 ShenandoahHeapRegion* current = get_region(i);
1307 blk->heap_region_do(current);
1308 }
1309}
1310
1311class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1312private:
1313 ShenandoahHeap* const _heap;
1314 ShenandoahHeapRegionClosure* const _blk;
1315
1316 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
1317 volatile size_t _index;
1318 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
1319
1320public:
1321 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1322 AbstractGangTask("Parallel Region Task"),
1323 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1324
1325 void work(uint worker_id) {
1326 size_t stride = ShenandoahParallelRegionStride;
1327
1328 size_t max = _heap->num_regions();
1329 while (_index < max) {
1330 size_t cur = Atomic::add(stride, &_index) - stride;
1331 size_t start = cur;
1332 size_t end = MIN2(cur + stride, max);
1333 if (start >= max) break;
1334
1335 for (size_t i = cur; i < end; i++) {
1336 ShenandoahHeapRegion* current = _heap->get_region(i);
1337 _blk->heap_region_do(current);
1338 }
1339 }
1340 }
1341};
1342
1343void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1344 assert(blk->is_thread_safe(), "Only thread-safe closures here");
1345 if (num_regions() > ShenandoahParallelRegionStride) {
1346 ShenandoahParallelHeapRegionTask task(blk);
1347 workers()->run_task(&task);
1348 } else {
1349 heap_region_iterate(blk);
1350 }
1351}
1352
1353class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1354private:
1355 ShenandoahMarkingContext* const _ctx;
1356public:
1357 ShenandoahClearLivenessClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1358
1359 void heap_region_do(ShenandoahHeapRegion* r) {
1360 if (r->is_active()) {
1361 r->clear_live_data();
1362 _ctx->capture_top_at_mark_start(r);
1363 } else {
1364 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
1365 assert(_ctx->top_at_mark_start(r) == r->top(),
1366 "Region " SIZE_FORMAT " should already have correct TAMS", r->region_number());
1367 }
1368 }
1369
1370 bool is_thread_safe() { return true; }
1371};
1372
1373void ShenandoahHeap::op_init_mark() {
1374 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1375 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1376
1377 assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1378 assert(!marking_context()->is_complete(), "should not be complete");
1379
1380 if (ShenandoahVerify) {
1381 verifier()->verify_before_concmark();
1382 }
1383
1384 if (VerifyBeforeGC) {
1385 Universe::verify();
1386 }
1387
1388 set_concurrent_mark_in_progress(true);
1389 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1390 {
1391 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1392 make_parsable(true);
1393 }
1394
1395 {
1396 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1397 ShenandoahClearLivenessClosure clc;
1398 parallel_heap_region_iterate(&clc);
1399 }
1400
1401 // Make above changes visible to worker threads
1402 OrderAccess::fence();
1403
1404 concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1405
1406 if (UseTLAB) {
1407 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1408 resize_tlabs();
1409 }
1410
1411 if (ShenandoahPacing) {
1412 pacer()->setup_for_mark();
1413 }
1414}
1415
1416void ShenandoahHeap::op_mark() {
1417 concurrent_mark()->mark_from_roots();
1418}
1419
1420class ShenandoahCompleteLivenessClosure : public ShenandoahHeapRegionClosure {
1421private:
1422 ShenandoahMarkingContext* const _ctx;
1423public:
1424 ShenandoahCompleteLivenessClosure() : _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
1425
1426 void heap_region_do(ShenandoahHeapRegion* r) {
1427 if (r->is_active()) {
1428 HeapWord *tams = _ctx->top_at_mark_start(r);
1429 HeapWord *top = r->top();
1430 if (top > tams) {
1431 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1432 }
1433 } else {
1434 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
1435 assert(_ctx->top_at_mark_start(r) == r->top(),
1436 "Region " SIZE_FORMAT " should have correct TAMS", r->region_number());
1437 }
1438 }
1439
1440 bool is_thread_safe() { return true; }
1441};
1442
1443void ShenandoahHeap::op_final_mark() {
1444 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1445
1446 // It is critical that we
1447 // evacuate roots right after finishing marking, so that we don't
1448 // get unmarked objects in the roots.
1449
1450 if (!cancelled_gc()) {
1451 concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1452
1453 if (has_forwarded_objects()) {
1454 // Degen may be caused by failed evacuation of roots
1455 if (is_degenerated_gc_in_progress()) {
1456 concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
1457 } else {
1458 concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::update_roots);
1459 }
1460 }
1461
1462 if (ShenandoahVerify) {
1463 verifier()->verify_roots_no_forwarded();
1464 }
1465
1466 stop_concurrent_marking();
1467
1468 {
1469 ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
1470
1471 // All allocations past TAMS are implicitly live, adjust the region data.
1472 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1473 ShenandoahCompleteLivenessClosure cl;
1474 parallel_heap_region_iterate(&cl);
1475 }
1476
1477 {
1478 ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1479
1480 make_parsable(true);
1481
1482 trash_cset_regions();
1483
1484 {
1485 ShenandoahHeapLocker locker(lock());
1486 _collection_set->clear();
1487 _free_set->clear();
1488
1489 heuristics()->choose_collection_set(_collection_set);
1490
1491 _free_set->rebuild();
1492 }
1493 }
1494
1495 // If collection set has candidates, start evacuation.
1496 // Otherwise, bypass the rest of the cycle.
1497 if (!collection_set()->is_empty()) {
1498 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1499
1500 if (ShenandoahVerify) {
1501 verifier()->verify_before_evacuation();
1502 }
1503
1504 set_evacuation_in_progress(true);
1505 // From here on, we need to update references.
1506 set_has_forwarded_objects(true);
1507
1508 evacuate_and_update_roots();
1509
1510 if (ShenandoahPacing) {
1511 pacer()->setup_for_evac();
1512 }
1513
1514 if (ShenandoahVerify) {
1515 verifier()->verify_roots_no_forwarded();
1516 verifier()->verify_during_evacuation();
1517 }
1518 } else {
1519 if (ShenandoahVerify) {
1520 verifier()->verify_after_concmark();
1521 }
1522
1523 if (VerifyAfterGC) {
1524 Universe::verify();
1525 }
1526 }
1527
1528 } else {
1529 concurrent_mark()->cancel();
1530 stop_concurrent_marking();
1531
1532 if (process_references()) {
1533 // Abandon reference processing right away: pre-cleaning must have failed.
1534 ReferenceProcessor *rp = ref_processor();
1535 rp->disable_discovery();
1536 rp->abandon_partial_discovery();
1537 rp->verify_no_references_recorded();
1538 }
1539 }
1540}
1541
1542void ShenandoahHeap::op_final_evac() {
1543 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1544
1545 set_evacuation_in_progress(false);
1546
1547 retire_and_reset_gclabs();
1548
1549 if (ShenandoahVerify) {
1550 verifier()->verify_after_evacuation();
1551 }
1552
1553 if (VerifyAfterGC) {
1554 Universe::verify();
1555 }
1556}
1557
1558void ShenandoahHeap::op_conc_evac() {
1559 ShenandoahEvacuationTask task(this, _collection_set, true);
1560 workers()->run_task(&task);
1561}
1562
1563void ShenandoahHeap::op_stw_evac() {
1564 ShenandoahEvacuationTask task(this, _collection_set, false);
1565 workers()->run_task(&task);
1566}
1567
1568void ShenandoahHeap::op_updaterefs() {
1569 update_heap_references(true);
1570}
1571
1572void ShenandoahHeap::op_cleanup() {
1573 free_set()->recycle_trash();
1574}
1575
1576void ShenandoahHeap::op_reset() {
1577 reset_mark_bitmap();
1578}
1579
1580void ShenandoahHeap::op_preclean() {
1581 concurrent_mark()->preclean_weak_refs();
1582}
1583
1584void ShenandoahHeap::op_init_traversal() {
1585 traversal_gc()->init_traversal_collection();
1586}
1587
1588void ShenandoahHeap::op_traversal() {
1589 traversal_gc()->concurrent_traversal_collection();
1590}
1591
1592void ShenandoahHeap::op_final_traversal() {
1593 traversal_gc()->final_traversal_collection();
1594}
1595
1596void ShenandoahHeap::op_full(GCCause::Cause cause) {
1597 ShenandoahMetricsSnapshot metrics;
1598 metrics.snap_before();
1599
1600 full_gc()->do_it(cause);
1601 if (UseTLAB) {
1602 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1603 resize_all_tlabs();
1604 }
1605
1606 metrics.snap_after();
1607
1608 if (metrics.is_good_progress()) {
1609 _progress_last_gc.set();
1610 } else {
1611 // Nothing to do. Tell the allocation path that we have failed to make
1612 // progress, and it can finally fail.
1613 _progress_last_gc.unset();
1614 }
1615}
1616
1617void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1618 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1619 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1620 // some phase, we have to upgrade the Degenerate GC to Full GC.
1621
1622 clear_cancelled_gc();
1623
1624 ShenandoahMetricsSnapshot metrics;
1625 metrics.snap_before();
1626
1627 switch (point) {
1628 case _degenerated_traversal:
1629 {
1630 // Drop the collection set. Note: this leaves some already forwarded objects
1631 // behind, which may be problematic, see comments for ShenandoahEvacAssist
1632 // workarounds in ShenandoahTraversalHeuristics.
1633
1634 ShenandoahHeapLocker locker(lock());
1635 collection_set()->clear_current_index();
1636 for (size_t i = 0; i < collection_set()->count(); i++) {
1637 ShenandoahHeapRegion* r = collection_set()->next();
1638 r->make_regular_bypass();
1639 }
1640 collection_set()->clear();
1641 }
1642 op_final_traversal();
1643 op_cleanup();
1644 return;
1645
1646 // The cases below form the Duff's-like device: it describes the actual GC cycle,
1647 // but enters it at different points, depending on which concurrent phase had
1648 // degenerated.
1649
1650 case _degenerated_outside_cycle:
1651 // We have degenerated from outside the cycle, which means something is bad with
1652 // the heap, most probably heavy humongous fragmentation, or we are very low on free
1653 // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1654 // we can do the most aggressive degen cycle, which includes processing references and
1655 // class unloading, unless those features are explicitly disabled.
1656 //
1657 // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1658 // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1659 set_process_references(heuristics()->can_process_references());
1660 set_unload_classes(heuristics()->can_unload_classes());
1661
1662 if (heuristics()->can_do_traversal_gc()) {
1663 // Not possible to degenerate from here, upgrade to Full GC right away.
1664 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1665 op_degenerated_fail();
1666 return;
1667 }
1668
1669 op_reset();
1670
1671 op_init_mark();
1672 if (cancelled_gc()) {
1673 op_degenerated_fail();
1674 return;
1675 }
1676
1677 case _degenerated_mark:
1678 op_final_mark();
1679 if (cancelled_gc()) {
1680 op_degenerated_fail();
1681 return;
1682 }
1683
1684 op_cleanup();
1685
1686 case _degenerated_evac:
1687 // If heuristics thinks we should do the cycle, this flag would be set,
1688 // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1689 if (is_evacuation_in_progress()) {
1690
1691 // Degeneration under oom-evac protocol might have left some objects in
1692 // collection set un-evacuated. Restart evacuation from the beginning to
1693 // capture all objects. For all the objects that are already evacuated,
1694 // it would be a simple check, which is supposed to be fast. This is also
1695 // safe to do even without degeneration, as CSet iterator is at beginning
1696 // in preparation for evacuation anyway.
1697 //
1698 // Before doing that, we need to make sure we never had any cset-pinned
1699 // regions. This may happen if allocation failure happened when evacuating
1700 // the about-to-be-pinned object, oom-evac protocol left the object in
1701 // the collection set, and then the pin reached the cset region. If we continue
1702 // the cycle here, we would trash the cset and alive objects in it. To avoid
1703 // it, we fail degeneration right away and slide into Full GC to recover.
1704
1705 {
1706 collection_set()->clear_current_index();
1707
1708 ShenandoahHeapRegion* r;
1709 while ((r = collection_set()->next()) != NULL) {
1710 if (r->is_pinned()) {
1711 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1712 op_degenerated_fail();
1713 return;
1714 }
1715 }
1716
1717 collection_set()->clear_current_index();
1718 }
1719
1720 op_stw_evac();
1721 if (cancelled_gc()) {
1722 op_degenerated_fail();
1723 return;
1724 }
1725 }
1726
1727 // If heuristics thinks we should do the cycle, this flag would be set,
1728 // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1729 if (has_forwarded_objects()) {
1730 op_init_updaterefs();
1731 if (cancelled_gc()) {
1732 op_degenerated_fail();
1733 return;
1734 }
1735 }
1736
1737 case _degenerated_updaterefs:
1738 if (has_forwarded_objects()) {
1739 op_final_updaterefs();
1740 if (cancelled_gc()) {
1741 op_degenerated_fail();
1742 return;
1743 }
1744 }
1745
1746 op_cleanup();
1747 break;
1748
1749 default:
1750 ShouldNotReachHere();
1751 }
1752
1753 if (ShenandoahVerify) {
1754 verifier()->verify_after_degenerated();
1755 }
1756
1757 if (VerifyAfterGC) {
1758 Universe::verify();
1759 }
1760
1761 metrics.snap_after();
1762
1763 // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1764 // because that probably means the heap is overloaded and/or fragmented.
1765 if (!metrics.is_good_progress()) {
1766 _progress_last_gc.unset();
1767 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1768 op_degenerated_futile();
1769 } else {
1770 _progress_last_gc.set();
1771 }
1772}
1773
1774void ShenandoahHeap::op_degenerated_fail() {
1775 log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1776 shenandoah_policy()->record_degenerated_upgrade_to_full();
1777 op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1778}
1779
1780void ShenandoahHeap::op_degenerated_futile() {
1781 shenandoah_policy()->record_degenerated_upgrade_to_full();
1782 op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1783}
1784
1785void ShenandoahHeap::stop_concurrent_marking() {
1786 assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1787 set_concurrent_mark_in_progress(false);
1788 if (!cancelled_gc()) {
1789 // If we needed to update refs, and concurrent marking has been cancelled,
1790 // we need to finish updating references.
1791 set_has_forwarded_objects(false);
1792 mark_complete_marking_context();
1793 }
1794}
1795
1796void ShenandoahHeap::force_satb_flush_all_threads() {
1797 if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
1798 // No need to flush SATBs
1799 return;
1800 }
1801
1802 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1803 ShenandoahThreadLocalData::set_force_satb_flush(t, true);
1804 }
1805 // The threads are not "acquiring" their thread-local data, but it does not
1806 // hurt to "release" the updates here anyway.
1807 OrderAccess::fence();
1808}
1809
1810void ShenandoahHeap::set_gc_state_all_threads(char state) {
1811 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1812 ShenandoahThreadLocalData::set_gc_state(t, state);
1813 }
1814}
1815
1816void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1817 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1818 _gc_state.set_cond(mask, value);
1819 set_gc_state_all_threads(_gc_state.raw_value());
1820}
1821
1822void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1823 if (has_forwarded_objects()) {
1824 set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1825 } else {
1826 set_gc_state_mask(MARKING, in_progress);
1827 }
1828 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1829}
1830
1831void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1832 set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
1833 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1834}
1835
1836void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1837 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1838 set_gc_state_mask(EVACUATION, in_progress);
1839}
1840
1841void ShenandoahHeap::ref_processing_init() {
1842 assert(_max_workers > 0, "Sanity");
1843
1844 _ref_processor =
1845 new ReferenceProcessor(&_subject_to_discovery, // is_subject_to_discovery
1846 ParallelRefProcEnabled, // MT processing
1847 _max_workers, // Degree of MT processing
1848 true, // MT discovery
1849 _max_workers, // Degree of MT discovery
1850 false, // Reference discovery is not atomic
1851 NULL, // No closure, should be installed before use
1852 true); // Scale worker threads
1853
1854 shenandoah_assert_rp_isalive_not_installed();
1855}
1856
1857GCTracer* ShenandoahHeap::tracer() {
1858 return shenandoah_policy()->tracer();
1859}
1860
1861size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1862 return _free_set->used();
1863}
1864
1865bool ShenandoahHeap::try_cancel_gc() {
1866 while (true) {
1867 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1868 if (prev == CANCELLABLE) return true;
1869 else if (prev == CANCELLED) return false;
1870 assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
1871 assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
1872 {
1873 // We need to provide a safepoint here, otherwise we might
1874 // spin forever if a SP is pending.
1875 ThreadBlockInVM sp(JavaThread::current());
1876 SpinPause();
1877 }
1878 }
1879}
1880
1881void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1882 if (try_cancel_gc()) {
1883 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1884 log_info(gc)("%s", msg.buffer());
1885 Events::log(Thread::current(), "%s", msg.buffer());
1886 }
1887}
1888
1889uint ShenandoahHeap::max_workers() {
1890 return _max_workers;
1891}
1892
1893void ShenandoahHeap::stop() {
1894 // The shutdown sequence should be able to terminate when GC is running.
1895
1896 // Step 0. Notify policy to disable event recording.
1897 _shenandoah_policy->record_shutdown();
1898
1899 // Step 1. Notify control thread that we are in shutdown.
1900 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1901 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1902 control_thread()->prepare_for_graceful_shutdown();
1903
1904 // Step 2. Notify GC workers that we are cancelling GC.
1905 cancel_gc(GCCause::_shenandoah_stop_vm);
1906
1907 // Step 3. Wait until GC worker exits normally.
1908 control_thread()->stop();
1909
1910 // Step 4. Stop String Dedup thread if it is active
1911 if (ShenandoahStringDedup::is_enabled()) {
1912 ShenandoahStringDedup::stop();
1913 }
1914}
1915
1916void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1917 assert(heuristics()->can_unload_classes(), "Class unloading should be enabled");
1918
1919 ShenandoahGCPhase root_phase(full_gc ?
1920 ShenandoahPhaseTimings::full_gc_purge :
1921 ShenandoahPhaseTimings::purge);
1922
1923 ShenandoahIsAliveSelector alive;
1924 BoolObjectClosure* is_alive = alive.is_alive_closure();
1925
1926 bool purged_class;
1927
1928 // Unload classes and purge SystemDictionary.
1929 {
1930 ShenandoahGCPhase phase(full_gc ?
1931 ShenandoahPhaseTimings::full_gc_purge_class_unload :
1932 ShenandoahPhaseTimings::purge_class_unload);
1933 purged_class = SystemDictionary::do_unloading(gc_timer());
1934 }
1935
1936 {
1937 ShenandoahGCPhase phase(full_gc ?
1938 ShenandoahPhaseTimings::full_gc_purge_par :
1939 ShenandoahPhaseTimings::purge_par);
1940 uint active = _workers->active_workers();
1941 ParallelCleaningTask unlink_task(is_alive, active, purged_class, true);
1942 _workers->run_task(&unlink_task);
1943 }
1944
1945 {
1946 ShenandoahGCPhase phase(full_gc ?
1947 ShenandoahPhaseTimings::full_gc_purge_cldg :
1948 ShenandoahPhaseTimings::purge_cldg);
1949 ClassLoaderDataGraph::purge();
1950 }
1951}
1952
1953void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1954 set_gc_state_mask(HAS_FORWARDED, cond);
1955}
1956
1957void ShenandoahHeap::set_process_references(bool pr) {
1958 _process_references.set_cond(pr);
1959}
1960
1961void ShenandoahHeap::set_unload_classes(bool uc) {
1962 _unload_classes.set_cond(uc);
1963}
1964
1965bool ShenandoahHeap::process_references() const {
1966 return _process_references.is_set();
1967}
1968
1969bool ShenandoahHeap::unload_classes() const {
1970 return _unload_classes.is_set();
1971}
1972
1973address ShenandoahHeap::in_cset_fast_test_addr() {
1974 ShenandoahHeap* heap = ShenandoahHeap::heap();
1975 assert(heap->collection_set() != NULL, "Sanity");
1976 return (address) heap->collection_set()->biased_map_address();
1977}
1978
1979address ShenandoahHeap::cancelled_gc_addr() {
1980 return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1981}
1982
1983address ShenandoahHeap::gc_state_addr() {
1984 return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
1985}
1986
1987size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1988 return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
1989}
1990
1991void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1992 OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
1993}
1994
1995void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1996 _degenerated_gc_in_progress.set_cond(in_progress);
1997}
1998
1999void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2000 _full_gc_in_progress.set_cond(in_progress);
2001}
2002
2003void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2004 assert (is_full_gc_in_progress(), "should be");
2005 _full_gc_move_in_progress.set_cond(in_progress);
2006}
2007
2008void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2009 set_gc_state_mask(UPDATEREFS, in_progress);
2010}
2011
2012void ShenandoahHeap::register_nmethod(nmethod* nm) {
2013 ShenandoahCodeRoots::add_nmethod(nm);
2014}
2015
2016void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2017 ShenandoahCodeRoots::remove_nmethod(nm);
2018}
2019
2020oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2021 ShenandoahHeapLocker locker(lock());
2022 heap_region_containing(o)->make_pinned();
2023 return o;
2024}
2025
2026void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2027 ShenandoahHeapLocker locker(lock());
2028 heap_region_containing(o)->make_unpinned();
2029}
2030
2031GCTimer* ShenandoahHeap::gc_timer() const {
2032 return _gc_timer;
2033}
2034
2035#ifdef ASSERT
2036void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2037 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2038
2039 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2040 if (UseDynamicNumberOfGCThreads ||
2041 (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2042 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2043 } else {
2044 // Use ParallelGCThreads inside safepoints
2045 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2046 }
2047 } else {
2048 if (UseDynamicNumberOfGCThreads ||
2049 (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2050 assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2051 } else {
2052 // Use ConcGCThreads outside safepoints
2053 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2054 }
2055 }
2056}
2057#endif
2058
2059ShenandoahVerifier* ShenandoahHeap::verifier() {
2060 guarantee(ShenandoahVerify, "Should be enabled");
2061 assert (_verifier != NULL, "sanity");
2062 return _verifier;
2063}
2064
2065template<class T>
2066class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2067private:
2068 T cl;
2069 ShenandoahHeap* _heap;
2070 ShenandoahRegionIterator* _regions;
2071 bool _concurrent;
2072public:
2073 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2074 AbstractGangTask("Concurrent Update References Task"),
2075 cl(T()),
2076 _heap(ShenandoahHeap::heap()),
2077 _regions(regions),
2078 _concurrent(concurrent) {
2079 }
2080
2081 void work(uint worker_id) {
2082 if (_concurrent) {
2083 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2084 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2085 do_work();
2086 } else {
2087 ShenandoahParallelWorkerSession worker_session(worker_id);
2088 do_work();
2089 }
2090 }
2091
2092private:
2093 void do_work() {
2094 ShenandoahHeapRegion* r = _regions->next();
2095 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2096 while (r != NULL) {
2097 HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
2098 assert (top_at_start_ur >= r->bottom(), "sanity");
2099 if (r->is_active() && !r->is_cset()) {
2100 _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur);
2101 }
2102 if (ShenandoahPacing) {
2103 _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
2104 }
2105 if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2106 return;
2107 }
2108 r = _regions->next();
2109 }
2110 }
2111};
2112
2113void ShenandoahHeap::update_heap_references(bool concurrent) {
2114 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2115 workers()->run_task(&task);
2116}
2117
2118void ShenandoahHeap::op_init_updaterefs() {
2119 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2120
2121 set_evacuation_in_progress(false);
2122
2123 retire_and_reset_gclabs();
2124
2125 if (ShenandoahVerify) {
2126 if (!is_degenerated_gc_in_progress()) {
2127 verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2128 }
2129 verifier()->verify_before_updaterefs();
2130 }
2131
2132 set_update_refs_in_progress(true);
2133 make_parsable(true);
2134 for (uint i = 0; i < num_regions(); i++) {
2135 ShenandoahHeapRegion* r = get_region(i);
2136 r->set_concurrent_iteration_safe_limit(r->top());
2137 }
2138
2139 // Reset iterator.
2140 _update_refs_iterator.reset();
2141
2142 if (ShenandoahPacing) {
2143 pacer()->setup_for_updaterefs();
2144 }
2145}
2146
2147void ShenandoahHeap::op_final_updaterefs() {
2148 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2149
2150 // Check if there is left-over work, and finish it
2151 if (_update_refs_iterator.has_next()) {
2152 ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2153
2154 // Finish updating references where we left off.
2155 clear_cancelled_gc();
2156 update_heap_references(false);
2157 }
2158
2159 // Clear cancelled GC, if set. On cancellation path, the block before would handle
2160 // everything. On degenerated paths, cancelled gc would not be set anyway.
2161 if (cancelled_gc()) {
2162 clear_cancelled_gc();
2163 }
2164 assert(!cancelled_gc(), "Should have been done right before");
2165
2166 if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2167 verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
2168 }
2169
2170 if (is_degenerated_gc_in_progress()) {
2171 concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2172 } else {
2173 concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2174 }
2175
2176 ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2177
2178 trash_cset_regions();
2179 set_has_forwarded_objects(false);
2180 set_update_refs_in_progress(false);
2181
2182 if (ShenandoahVerify) {
2183 verifier()->verify_roots_no_forwarded();
2184 verifier()->verify_after_updaterefs();
2185 }
2186
2187 if (VerifyAfterGC) {
2188 Universe::verify();
2189 }
2190
2191 {
2192 ShenandoahHeapLocker locker(lock());
2193 _free_set->rebuild();
2194 }
2195}
2196
2197#ifdef ASSERT
2198void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2199 _lock.assert_owned_by_current_thread();
2200}
2201
2202void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2203 _lock.assert_not_owned_by_current_thread();
2204}
2205
2206void ShenandoahHeap::assert_heaplock_or_safepoint() {
2207 _lock.assert_owned_by_current_thread_or_safepoint();
2208}
2209#endif
2210
2211void ShenandoahHeap::print_extended_on(outputStream *st) const {
2212 print_on(st);
2213 print_heap_regions_on(st);
2214}
2215
2216bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2217 size_t slice = r->region_number() / _bitmap_regions_per_slice;
2218
2219 size_t regions_from = _bitmap_regions_per_slice * slice;
2220 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2221 for (size_t g = regions_from; g < regions_to; g++) {
2222 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2223 if (skip_self && g == r->region_number()) continue;
2224 if (get_region(g)->is_committed()) {
2225 return true;
2226 }
2227 }
2228 return false;
2229}
2230
2231bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2232 assert_heaplock_owned_by_current_thread();
2233
2234 // Bitmaps in special regions do not need commits
2235 if (_bitmap_region_special) {
2236 return true;
2237 }
2238
2239 if (is_bitmap_slice_committed(r, true)) {
2240 // Some other region from the group is already committed, meaning the bitmap
2241 // slice is already committed, we exit right away.
2242 return true;
2243 }
2244
2245 // Commit the bitmap slice:
2246 size_t slice = r->region_number() / _bitmap_regions_per_slice;
2247 size_t off = _bitmap_bytes_per_slice * slice;
2248 size_t len = _bitmap_bytes_per_slice;
2249 if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
2250 return false;
2251 }
2252 return true;
2253}
2254
2255bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2256 assert_heaplock_owned_by_current_thread();
2257
2258 // Bitmaps in special regions do not need uncommits
2259 if (_bitmap_region_special) {
2260 return true;
2261 }
2262
2263 if (is_bitmap_slice_committed(r, true)) {
2264 // Some other region from the group is still committed, meaning the bitmap
2265 // slice is should stay committed, exit right away.
2266 return true;
2267 }
2268
2269 // Uncommit the bitmap slice:
2270 size_t slice = r->region_number() / _bitmap_regions_per_slice;
2271 size_t off = _bitmap_bytes_per_slice * slice;
2272 size_t len = _bitmap_bytes_per_slice;
2273 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2274 return false;
2275 }
2276 return true;
2277}
2278
2279void ShenandoahHeap::safepoint_synchronize_begin() {
2280 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2281 SuspendibleThreadSet::synchronize();
2282 }
2283}
2284
2285void ShenandoahHeap::safepoint_synchronize_end() {
2286 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2287 SuspendibleThreadSet::desynchronize();
2288 }
2289}
2290
2291void ShenandoahHeap::vmop_entry_init_mark() {
2292 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2293 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2294 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2295
2296 try_inject_alloc_failure();
2297 VM_ShenandoahInitMark op;
2298 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2299}
2300
2301void ShenandoahHeap::vmop_entry_final_mark() {
2302 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2303 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2304 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2305
2306 try_inject_alloc_failure();
2307 VM_ShenandoahFinalMarkStartEvac op;
2308 VMThread::execute(&op); // jump to entry_final_mark under safepoint
2309}
2310
2311void ShenandoahHeap::vmop_entry_final_evac() {
2312 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2313 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2314 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2315
2316 VM_ShenandoahFinalEvac op;
2317 VMThread::execute(&op); // jump to entry_final_evac under safepoint
2318}
2319
2320void ShenandoahHeap::vmop_entry_init_updaterefs() {
2321 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2322 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2323 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2324
2325 try_inject_alloc_failure();
2326 VM_ShenandoahInitUpdateRefs op;
2327 VMThread::execute(&op);
2328}
2329
2330void ShenandoahHeap::vmop_entry_final_updaterefs() {
2331 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2332 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2333 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2334
2335 try_inject_alloc_failure();
2336 VM_ShenandoahFinalUpdateRefs op;
2337 VMThread::execute(&op);
2338}
2339
2340void ShenandoahHeap::vmop_entry_init_traversal() {
2341 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2342 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2343 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2344
2345 try_inject_alloc_failure();
2346 VM_ShenandoahInitTraversalGC op;
2347 VMThread::execute(&op);
2348}
2349
2350void ShenandoahHeap::vmop_entry_final_traversal() {
2351 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2352 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2353 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2354
2355 try_inject_alloc_failure();
2356 VM_ShenandoahFinalTraversalGC op;
2357 VMThread::execute(&op);
2358}
2359
2360void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2361 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2362 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2363 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2364
2365 try_inject_alloc_failure();
2366 VM_ShenandoahFullGC op(cause);
2367 VMThread::execute(&op);
2368}
2369
2370void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2371 TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2372 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2373 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2374
2375 VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2376 VMThread::execute(&degenerated_gc);
2377}
2378
2379void ShenandoahHeap::entry_init_mark() {
2380 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2381 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2382 const char* msg = init_mark_event_message();
2383 GCTraceTime(Info, gc) time(msg, gc_timer());
2384 EventMark em("%s", msg);
2385
2386 ShenandoahWorkerScope scope(workers(),
2387 ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2388 "init marking");
2389
2390 op_init_mark();
2391}
2392
2393void ShenandoahHeap::entry_final_mark() {
2394 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2395 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2396 const char* msg = final_mark_event_message();
2397 GCTraceTime(Info, gc) time(msg, gc_timer());
2398 EventMark em("%s", msg);
2399
2400 ShenandoahWorkerScope scope(workers(),
2401 ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2402 "final marking");
2403
2404 op_final_mark();
2405}
2406
2407void ShenandoahHeap::entry_final_evac() {
2408 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2409 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2410 static const char* msg = "Pause Final Evac";
2411 GCTraceTime(Info, gc) time(msg, gc_timer());
2412 EventMark em("%s", msg);
2413
2414 op_final_evac();
2415}
2416
2417void ShenandoahHeap::entry_init_updaterefs() {
2418 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2419 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2420
2421 static const char* msg = "Pause Init Update Refs";
2422 GCTraceTime(Info, gc) time(msg, gc_timer());
2423 EventMark em("%s", msg);
2424
2425 // No workers used in this phase, no setup required
2426
2427 op_init_updaterefs();
2428}
2429
2430void ShenandoahHeap::entry_final_updaterefs() {
2431 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2432 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2433
2434 static const char* msg = "Pause Final Update Refs";
2435 GCTraceTime(Info, gc) time(msg, gc_timer());
2436 EventMark em("%s", msg);
2437
2438 ShenandoahWorkerScope scope(workers(),
2439 ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2440 "final reference update");
2441
2442 op_final_updaterefs();
2443}
2444
2445void ShenandoahHeap::entry_init_traversal() {
2446 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2447 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2448
2449 static const char* msg = "Pause Init Traversal";
2450 GCTraceTime(Info, gc) time(msg, gc_timer());
2451 EventMark em("%s", msg);
2452
2453 ShenandoahWorkerScope scope(workers(),
2454 ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2455 "init traversal");
2456
2457 op_init_traversal();
2458}
2459
2460void ShenandoahHeap::entry_final_traversal() {
2461 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2462 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2463
2464 static const char* msg = "Pause Final Traversal";
2465 GCTraceTime(Info, gc) time(msg, gc_timer());
2466 EventMark em("%s", msg);
2467
2468 ShenandoahWorkerScope scope(workers(),
2469 ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2470 "final traversal");
2471
2472 op_final_traversal();
2473}
2474
2475void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2476 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2477 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2478
2479 static const char* msg = "Pause Full";
2480 GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2481 EventMark em("%s", msg);
2482
2483 ShenandoahWorkerScope scope(workers(),
2484 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2485 "full gc");
2486
2487 op_full(cause);
2488}
2489
2490void ShenandoahHeap::entry_degenerated(int point) {
2491 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2492 ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2493
2494 ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2495 const char* msg = degen_event_message(dpoint);
2496 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2497 EventMark em("%s", msg);
2498
2499 ShenandoahWorkerScope scope(workers(),
2500 ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2501 "stw degenerated gc");
2502
2503 set_degenerated_gc_in_progress(true);
2504 op_degenerated(dpoint);
2505 set_degenerated_gc_in_progress(false);
2506}
2507
2508void ShenandoahHeap::entry_mark() {
2509 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2510
2511 const char* msg = conc_mark_event_message();
2512 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2513 EventMark em("%s", msg);
2514
2515 ShenandoahWorkerScope scope(workers(),
2516 ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2517 "concurrent marking");
2518
2519 try_inject_alloc_failure();
2520 op_mark();
2521}
2522
2523void ShenandoahHeap::entry_evac() {
2524 ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2525 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2526
2527 static const char* msg = "Concurrent evacuation";
2528 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2529 EventMark em("%s", msg);
2530
2531 ShenandoahWorkerScope scope(workers(),
2532 ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2533 "concurrent evacuation");
2534
2535 try_inject_alloc_failure();
2536 op_conc_evac();
2537}
2538
2539void ShenandoahHeap::entry_updaterefs() {
2540 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2541
2542 static const char* msg = "Concurrent update references";
2543 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2544 EventMark em("%s", msg);
2545
2546 ShenandoahWorkerScope scope(workers(),
2547 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2548 "concurrent reference update");
2549
2550 try_inject_alloc_failure();
2551 op_updaterefs();
2552}
2553void ShenandoahHeap::entry_cleanup() {
2554 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2555
2556 static const char* msg = "Concurrent cleanup";
2557 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2558 EventMark em("%s", msg);
2559
2560 // This phase does not use workers, no need for setup
2561
2562 try_inject_alloc_failure();
2563 op_cleanup();
2564}
2565
2566void ShenandoahHeap::entry_reset() {
2567 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
2568
2569 static const char* msg = "Concurrent reset";
2570 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2571 EventMark em("%s", msg);
2572
2573 ShenandoahWorkerScope scope(workers(),
2574 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2575 "concurrent reset");
2576
2577 try_inject_alloc_failure();
2578 op_reset();
2579}
2580
2581void ShenandoahHeap::entry_preclean() {
2582 if (ShenandoahPreclean && process_references()) {
2583 static const char* msg = "Concurrent precleaning";
2584 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2585 EventMark em("%s", msg);
2586
2587 ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2588
2589 ShenandoahWorkerScope scope(workers(),
2590 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2591 "concurrent preclean",
2592 /* check_workers = */ false);
2593
2594 try_inject_alloc_failure();
2595 op_preclean();
2596 }
2597}
2598
2599void ShenandoahHeap::entry_traversal() {
2600 static const char* msg = "Concurrent traversal";
2601 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2602 EventMark em("%s", msg);
2603
2604 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2605
2606 ShenandoahWorkerScope scope(workers(),
2607 ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
2608 "concurrent traversal");
2609
2610 try_inject_alloc_failure();
2611 op_traversal();
2612}
2613
2614void ShenandoahHeap::entry_uncommit(double shrink_before) {
2615 static const char *msg = "Concurrent uncommit";
2616 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2617 EventMark em("%s", msg);
2618
2619 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2620
2621 op_uncommit(shrink_before);
2622}
2623
2624void ShenandoahHeap::try_inject_alloc_failure() {
2625 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2626 _inject_alloc_failure.set();
2627 os::naked_short_sleep(1);
2628 if (cancelled_gc()) {
2629 log_info(gc)("Allocation failure was successfully injected");
2630 }
2631 }
2632}
2633
2634bool ShenandoahHeap::should_inject_alloc_failure() {
2635 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2636}
2637
2638void ShenandoahHeap::initialize_serviceability() {
2639 _memory_pool = new ShenandoahMemoryPool(this);
2640 _cycle_memory_manager.add_pool(_memory_pool);
2641 _stw_memory_manager.add_pool(_memory_pool);
2642}
2643
2644GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2645 GrowableArray<GCMemoryManager*> memory_managers(2);
2646 memory_managers.append(&_cycle_memory_manager);
2647 memory_managers.append(&_stw_memory_manager);
2648 return memory_managers;
2649}
2650
2651GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2652 GrowableArray<MemoryPool*> memory_pools(1);
2653 memory_pools.append(_memory_pool);
2654 return memory_pools;
2655}
2656
2657MemoryUsage ShenandoahHeap::memory_usage() {
2658 return _memory_pool->get_memory_usage();
2659}
2660
2661void ShenandoahHeap::enter_evacuation() {
2662 _oom_evac_handler.enter_evacuation();
2663}
2664
2665void ShenandoahHeap::leave_evacuation() {
2666 _oom_evac_handler.leave_evacuation();
2667}
2668
2669ShenandoahRegionIterator::ShenandoahRegionIterator() :
2670 _heap(ShenandoahHeap::heap()),
2671 _index(0) {}
2672
2673ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2674 _heap(heap),
2675 _index(0) {}
2676
2677void ShenandoahRegionIterator::reset() {
2678 _index = 0;
2679}
2680
2681bool ShenandoahRegionIterator::has_next() const {
2682 return _index < _heap->num_regions();
2683}
2684
2685char ShenandoahHeap::gc_state() const {
2686 return _gc_state.raw_value();
2687}
2688
2689void ShenandoahHeap::deduplicate_string(oop str) {
2690 assert(java_lang_String::is_instance(str), "invariant");
2691
2692 if (ShenandoahStringDedup::is_enabled()) {
2693 ShenandoahStringDedup::deduplicate(str);
2694 }
2695}
2696
2697const char* ShenandoahHeap::init_mark_event_message() const {
2698 bool update_refs = has_forwarded_objects();
2699 bool proc_refs = process_references();
2700 bool unload_cls = unload_classes();
2701
2702 if (update_refs && proc_refs && unload_cls) {
2703 return "Pause Init Mark (update refs) (process weakrefs) (unload classes)";
2704 } else if (update_refs && proc_refs) {
2705 return "Pause Init Mark (update refs) (process weakrefs)";
2706 } else if (update_refs && unload_cls) {
2707 return "Pause Init Mark (update refs) (unload classes)";
2708 } else if (proc_refs && unload_cls) {
2709 return "Pause Init Mark (process weakrefs) (unload classes)";
2710 } else if (update_refs) {
2711 return "Pause Init Mark (update refs)";
2712 } else if (proc_refs) {
2713 return "Pause Init Mark (process weakrefs)";
2714 } else if (unload_cls) {
2715 return "Pause Init Mark (unload classes)";
2716 } else {
2717 return "Pause Init Mark";
2718 }
2719}
2720
2721const char* ShenandoahHeap::final_mark_event_message() const {
2722 bool update_refs = has_forwarded_objects();
2723 bool proc_refs = process_references();
2724 bool unload_cls = unload_classes();
2725
2726 if (update_refs && proc_refs && unload_cls) {
2727 return "Pause Final Mark (update refs) (process weakrefs) (unload classes)";
2728 } else if (update_refs && proc_refs) {
2729 return "Pause Final Mark (update refs) (process weakrefs)";
2730 } else if (update_refs && unload_cls) {
2731 return "Pause Final Mark (update refs) (unload classes)";
2732 } else if (proc_refs && unload_cls) {
2733 return "Pause Final Mark (process weakrefs) (unload classes)";
2734 } else if (update_refs) {
2735 return "Pause Final Mark (update refs)";
2736 } else if (proc_refs) {
2737 return "Pause Final Mark (process weakrefs)";
2738 } else if (unload_cls) {
2739 return "Pause Final Mark (unload classes)";
2740 } else {
2741 return "Pause Final Mark";
2742 }
2743}
2744
2745const char* ShenandoahHeap::conc_mark_event_message() const {
2746 bool update_refs = has_forwarded_objects();
2747 bool proc_refs = process_references();
2748 bool unload_cls = unload_classes();
2749
2750 if (update_refs && proc_refs && unload_cls) {
2751 return "Concurrent marking (update refs) (process weakrefs) (unload classes)";
2752 } else if (update_refs && proc_refs) {
2753 return "Concurrent marking (update refs) (process weakrefs)";
2754 } else if (update_refs && unload_cls) {
2755 return "Concurrent marking (update refs) (unload classes)";
2756 } else if (proc_refs && unload_cls) {
2757 return "Concurrent marking (process weakrefs) (unload classes)";
2758 } else if (update_refs) {
2759 return "Concurrent marking (update refs)";
2760 } else if (proc_refs) {
2761 return "Concurrent marking (process weakrefs)";
2762 } else if (unload_cls) {
2763 return "Concurrent marking (unload classes)";
2764 } else {
2765 return "Concurrent marking";
2766 }
2767}
2768
2769const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2770 switch (point) {
2771 case _degenerated_unset:
2772 return "Pause Degenerated GC (<UNSET>)";
2773 case _degenerated_traversal:
2774 return "Pause Degenerated GC (Traversal)";
2775 case _degenerated_outside_cycle:
2776 return "Pause Degenerated GC (Outside of Cycle)";
2777 case _degenerated_mark:
2778 return "Pause Degenerated GC (Mark)";
2779 case _degenerated_evac:
2780 return "Pause Degenerated GC (Evacuation)";
2781 case _degenerated_updaterefs:
2782 return "Pause Degenerated GC (Update Refs)";
2783 default:
2784 ShouldNotReachHere();
2785 return "ERROR";
2786 }
2787}
2788
2789jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2790#ifdef ASSERT
2791 assert(_liveness_cache != NULL, "sanity");
2792 assert(worker_id < _max_workers, "sanity");
2793 for (uint i = 0; i < num_regions(); i++) {
2794 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2795 }
2796#endif
2797 return _liveness_cache[worker_id];
2798}
2799
2800void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2801 assert(worker_id < _max_workers, "sanity");
2802 assert(_liveness_cache != NULL, "sanity");
2803 jushort* ld = _liveness_cache[worker_id];
2804 for (uint i = 0; i < num_regions(); i++) {
2805 ShenandoahHeapRegion* r = get_region(i);
2806 jushort live = ld[i];
2807 if (live > 0) {
2808 r->increase_live_data_gc_words(live);
2809 ld[i] = 0;
2810 }
2811 }
2812}
2813