1 | /* |
2 | * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. |
3 | * |
4 | * This code is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License version 2 only, as |
6 | * published by the Free Software Foundation. |
7 | * |
8 | * This code is distributed in the hope that it will be useful, but WITHOUT |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
11 | * version 2 for more details (a copy is included in the LICENSE file that |
12 | * accompanied this code). |
13 | * |
14 | * You should have received a copy of the GNU General Public License version |
15 | * 2 along with this work; if not, write to the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
17 | * |
18 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
19 | * or visit www.oracle.com if you need additional information or have any |
20 | * questions. |
21 | * |
22 | */ |
23 | |
24 | #include "precompiled.hpp" |
25 | #include "memory/allocation.hpp" |
26 | #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" |
27 | #include "gc/shenandoah/shenandoahHeap.inline.hpp" |
28 | #include "gc/shenandoah/shenandoahHeapRegion.hpp" |
29 | #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" |
30 | #include "gc/shenandoah/shenandoahTraversalGC.hpp" |
31 | #include "gc/shared/space.inline.hpp" |
32 | #include "jfr/jfrEvents.hpp" |
33 | #include "memory/iterator.inline.hpp" |
34 | #include "memory/resourceArea.hpp" |
35 | #include "memory/universe.hpp" |
36 | #include "oops/oop.inline.hpp" |
37 | #include "runtime/java.hpp" |
38 | #include "runtime/mutexLocker.hpp" |
39 | #include "runtime/os.hpp" |
40 | #include "runtime/safepoint.hpp" |
41 | |
42 | size_t ShenandoahHeapRegion::RegionCount = 0; |
43 | size_t ShenandoahHeapRegion::RegionSizeBytes = 0; |
44 | size_t ShenandoahHeapRegion::RegionSizeWords = 0; |
45 | size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0; |
46 | size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0; |
47 | size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0; |
48 | size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0; |
49 | size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0; |
50 | size_t ShenandoahHeapRegion::HumongousThresholdWords = 0; |
51 | size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0; |
52 | size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0; |
53 | |
54 | ShenandoahHeapRegion::PaddedAllocSeqNum ShenandoahHeapRegion::_alloc_seq_num; |
55 | |
56 | ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, |
57 | size_t size_words, size_t index, bool committed) : |
58 | _heap(heap), |
59 | _reserved(MemRegion(start, size_words)), |
60 | _region_number(index), |
61 | _new_top(NULL), |
62 | _critical_pins(0), |
63 | _empty_time(os::elapsedTime()), |
64 | _state(committed ? _empty_committed : _empty_uncommitted), |
65 | _tlab_allocs(0), |
66 | _gclab_allocs(0), |
67 | _shared_allocs(0), |
68 | _seqnum_first_alloc_mutator(0), |
69 | _seqnum_first_alloc_gc(0), |
70 | _seqnum_last_alloc_mutator(0), |
71 | _seqnum_last_alloc_gc(0), |
72 | _live_data(0) { |
73 | |
74 | ContiguousSpace::initialize(_reserved, true, committed); |
75 | } |
76 | |
77 | size_t ShenandoahHeapRegion::region_number() const { |
78 | return _region_number; |
79 | } |
80 | |
81 | void ShenandoahHeapRegion::report_illegal_transition(const char *method) { |
82 | ResourceMark rm; |
83 | stringStream ss; |
84 | ss.print("Illegal region state transition from \"%s\", at %s\n " , region_state_to_string(_state), method); |
85 | print_on(&ss); |
86 | fatal("%s" , ss.as_string()); |
87 | } |
88 | |
89 | void ShenandoahHeapRegion::make_regular_allocation() { |
90 | _heap->assert_heaplock_owned_by_current_thread(); |
91 | |
92 | switch (_state) { |
93 | case _empty_uncommitted: |
94 | do_commit(); |
95 | case _empty_committed: |
96 | set_state(_regular); |
97 | case _regular: |
98 | case _pinned: |
99 | return; |
100 | default: |
101 | report_illegal_transition("regular allocation" ); |
102 | } |
103 | } |
104 | |
105 | void ShenandoahHeapRegion::make_regular_bypass() { |
106 | _heap->assert_heaplock_owned_by_current_thread(); |
107 | assert (_heap->is_full_gc_in_progress() || _heap->is_degenerated_gc_in_progress(), |
108 | "only for full or degen GC" ); |
109 | |
110 | switch (_state) { |
111 | case _empty_uncommitted: |
112 | do_commit(); |
113 | case _empty_committed: |
114 | case _cset: |
115 | case _humongous_start: |
116 | case _humongous_cont: |
117 | set_state(_regular); |
118 | return; |
119 | case _pinned_cset: |
120 | set_state(_pinned); |
121 | return; |
122 | case _regular: |
123 | case _pinned: |
124 | return; |
125 | default: |
126 | report_illegal_transition("regular bypass" ); |
127 | } |
128 | } |
129 | |
130 | void ShenandoahHeapRegion::make_humongous_start() { |
131 | _heap->assert_heaplock_owned_by_current_thread(); |
132 | switch (_state) { |
133 | case _empty_uncommitted: |
134 | do_commit(); |
135 | case _empty_committed: |
136 | set_state(_humongous_start); |
137 | return; |
138 | default: |
139 | report_illegal_transition("humongous start allocation" ); |
140 | } |
141 | } |
142 | |
143 | void ShenandoahHeapRegion::make_humongous_start_bypass() { |
144 | _heap->assert_heaplock_owned_by_current_thread(); |
145 | assert (_heap->is_full_gc_in_progress(), "only for full GC" ); |
146 | |
147 | switch (_state) { |
148 | case _empty_committed: |
149 | case _regular: |
150 | case _humongous_start: |
151 | case _humongous_cont: |
152 | set_state(_humongous_start); |
153 | return; |
154 | default: |
155 | report_illegal_transition("humongous start bypass" ); |
156 | } |
157 | } |
158 | |
159 | void ShenandoahHeapRegion::make_humongous_cont() { |
160 | _heap->assert_heaplock_owned_by_current_thread(); |
161 | switch (_state) { |
162 | case _empty_uncommitted: |
163 | do_commit(); |
164 | case _empty_committed: |
165 | set_state(_humongous_cont); |
166 | return; |
167 | default: |
168 | report_illegal_transition("humongous continuation allocation" ); |
169 | } |
170 | } |
171 | |
172 | void ShenandoahHeapRegion::make_humongous_cont_bypass() { |
173 | _heap->assert_heaplock_owned_by_current_thread(); |
174 | assert (_heap->is_full_gc_in_progress(), "only for full GC" ); |
175 | |
176 | switch (_state) { |
177 | case _empty_committed: |
178 | case _regular: |
179 | case _humongous_start: |
180 | case _humongous_cont: |
181 | set_state(_humongous_cont); |
182 | return; |
183 | default: |
184 | report_illegal_transition("humongous continuation bypass" ); |
185 | } |
186 | } |
187 | |
188 | void ShenandoahHeapRegion::make_pinned() { |
189 | _heap->assert_heaplock_owned_by_current_thread(); |
190 | switch (_state) { |
191 | case _regular: |
192 | assert (_critical_pins == 0, "sanity" ); |
193 | set_state(_pinned); |
194 | case _pinned_cset: |
195 | case _pinned: |
196 | _critical_pins++; |
197 | return; |
198 | case _humongous_start: |
199 | assert (_critical_pins == 0, "sanity" ); |
200 | set_state(_pinned_humongous_start); |
201 | case _pinned_humongous_start: |
202 | _critical_pins++; |
203 | return; |
204 | case _cset: |
205 | guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled" ); |
206 | assert (_critical_pins == 0, "sanity" ); |
207 | _state = _pinned_cset; |
208 | _critical_pins++; |
209 | return; |
210 | default: |
211 | report_illegal_transition("pinning" ); |
212 | } |
213 | } |
214 | |
215 | void ShenandoahHeapRegion::make_unpinned() { |
216 | _heap->assert_heaplock_owned_by_current_thread(); |
217 | switch (_state) { |
218 | case _pinned: |
219 | assert (_critical_pins > 0, "sanity" ); |
220 | _critical_pins--; |
221 | if (_critical_pins == 0) { |
222 | set_state(_regular); |
223 | } |
224 | return; |
225 | case _regular: |
226 | case _humongous_start: |
227 | assert (_critical_pins == 0, "sanity" ); |
228 | return; |
229 | case _pinned_cset: |
230 | guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled" ); |
231 | assert (_critical_pins > 0, "sanity" ); |
232 | _critical_pins--; |
233 | if (_critical_pins == 0) { |
234 | set_state(_cset); |
235 | } |
236 | return; |
237 | case _pinned_humongous_start: |
238 | assert (_critical_pins > 0, "sanity" ); |
239 | _critical_pins--; |
240 | if (_critical_pins == 0) { |
241 | set_state(_humongous_start); |
242 | } |
243 | return; |
244 | default: |
245 | report_illegal_transition("unpinning" ); |
246 | } |
247 | } |
248 | |
249 | void ShenandoahHeapRegion::make_cset() { |
250 | _heap->assert_heaplock_owned_by_current_thread(); |
251 | switch (_state) { |
252 | case _regular: |
253 | set_state(_cset); |
254 | case _cset: |
255 | return; |
256 | default: |
257 | report_illegal_transition("cset" ); |
258 | } |
259 | } |
260 | |
261 | void ShenandoahHeapRegion::make_trash() { |
262 | _heap->assert_heaplock_owned_by_current_thread(); |
263 | switch (_state) { |
264 | case _cset: |
265 | // Reclaiming cset regions |
266 | case _humongous_start: |
267 | case _humongous_cont: |
268 | // Reclaiming humongous regions |
269 | case _regular: |
270 | // Immediate region reclaim |
271 | set_state(_trash); |
272 | return; |
273 | default: |
274 | report_illegal_transition("trashing" ); |
275 | } |
276 | } |
277 | |
278 | void ShenandoahHeapRegion::make_trash_immediate() { |
279 | make_trash(); |
280 | |
281 | // On this path, we know there are no marked objects in the region, |
282 | // tell marking context about it to bypass bitmap resets. |
283 | _heap->complete_marking_context()->reset_top_bitmap(this); |
284 | } |
285 | |
286 | void ShenandoahHeapRegion::make_empty() { |
287 | _heap->assert_heaplock_owned_by_current_thread(); |
288 | switch (_state) { |
289 | case _trash: |
290 | set_state(_empty_committed); |
291 | _empty_time = os::elapsedTime(); |
292 | return; |
293 | default: |
294 | report_illegal_transition("emptying" ); |
295 | } |
296 | } |
297 | |
298 | void ShenandoahHeapRegion::make_uncommitted() { |
299 | _heap->assert_heaplock_owned_by_current_thread(); |
300 | switch (_state) { |
301 | case _empty_committed: |
302 | do_uncommit(); |
303 | set_state(_empty_uncommitted); |
304 | return; |
305 | default: |
306 | report_illegal_transition("uncommiting" ); |
307 | } |
308 | } |
309 | |
310 | void ShenandoahHeapRegion::make_committed_bypass() { |
311 | _heap->assert_heaplock_owned_by_current_thread(); |
312 | assert (_heap->is_full_gc_in_progress(), "only for full GC" ); |
313 | |
314 | switch (_state) { |
315 | case _empty_uncommitted: |
316 | do_commit(); |
317 | set_state(_empty_committed); |
318 | return; |
319 | default: |
320 | report_illegal_transition("commit bypass" ); |
321 | } |
322 | } |
323 | |
324 | void ShenandoahHeapRegion::clear_live_data() { |
325 | OrderAccess::release_store_fence<size_t>(&_live_data, 0); |
326 | } |
327 | |
328 | void ShenandoahHeapRegion::reset_alloc_metadata() { |
329 | _tlab_allocs = 0; |
330 | _gclab_allocs = 0; |
331 | _shared_allocs = 0; |
332 | _seqnum_first_alloc_mutator = 0; |
333 | _seqnum_last_alloc_mutator = 0; |
334 | _seqnum_first_alloc_gc = 0; |
335 | _seqnum_last_alloc_gc = 0; |
336 | } |
337 | |
338 | void ShenandoahHeapRegion::reset_alloc_metadata_to_shared() { |
339 | if (used() > 0) { |
340 | _tlab_allocs = 0; |
341 | _gclab_allocs = 0; |
342 | _shared_allocs = used() >> LogHeapWordSize; |
343 | uint64_t next = _alloc_seq_num.value++; |
344 | _seqnum_first_alloc_mutator = next; |
345 | _seqnum_last_alloc_mutator = next; |
346 | _seqnum_first_alloc_gc = 0; |
347 | _seqnum_last_alloc_gc = 0; |
348 | } else { |
349 | reset_alloc_metadata(); |
350 | } |
351 | } |
352 | |
353 | size_t ShenandoahHeapRegion::get_shared_allocs() const { |
354 | return _shared_allocs * HeapWordSize; |
355 | } |
356 | |
357 | size_t ShenandoahHeapRegion::get_tlab_allocs() const { |
358 | return _tlab_allocs * HeapWordSize; |
359 | } |
360 | |
361 | size_t ShenandoahHeapRegion::get_gclab_allocs() const { |
362 | return _gclab_allocs * HeapWordSize; |
363 | } |
364 | |
365 | void ShenandoahHeapRegion::set_live_data(size_t s) { |
366 | assert(Thread::current()->is_VM_thread(), "by VM thread" ); |
367 | _live_data = (s >> LogHeapWordSize); |
368 | } |
369 | |
370 | size_t ShenandoahHeapRegion::get_live_data_words() const { |
371 | return OrderAccess::load_acquire(&_live_data); |
372 | } |
373 | |
374 | size_t ShenandoahHeapRegion::get_live_data_bytes() const { |
375 | return get_live_data_words() * HeapWordSize; |
376 | } |
377 | |
378 | bool ShenandoahHeapRegion::has_live() const { |
379 | return get_live_data_words() != 0; |
380 | } |
381 | |
382 | size_t ShenandoahHeapRegion::garbage() const { |
383 | assert(used() >= get_live_data_bytes(), "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT, |
384 | get_live_data_bytes(), used()); |
385 | |
386 | size_t result = used() - get_live_data_bytes(); |
387 | return result; |
388 | } |
389 | |
390 | void ShenandoahHeapRegion::print_on(outputStream* st) const { |
391 | st->print("|" ); |
392 | st->print(SIZE_FORMAT_W(5), this->_region_number); |
393 | |
394 | switch (_state) { |
395 | case _empty_uncommitted: |
396 | st->print("|EU " ); |
397 | break; |
398 | case _empty_committed: |
399 | st->print("|EC " ); |
400 | break; |
401 | case _regular: |
402 | st->print("|R " ); |
403 | break; |
404 | case _humongous_start: |
405 | st->print("|H " ); |
406 | break; |
407 | case _pinned_humongous_start: |
408 | st->print("|HP " ); |
409 | break; |
410 | case _humongous_cont: |
411 | st->print("|HC " ); |
412 | break; |
413 | case _cset: |
414 | st->print("|CS " ); |
415 | break; |
416 | case _trash: |
417 | st->print("|T " ); |
418 | break; |
419 | case _pinned: |
420 | st->print("|P " ); |
421 | break; |
422 | case _pinned_cset: |
423 | st->print("|CSP" ); |
424 | break; |
425 | default: |
426 | ShouldNotReachHere(); |
427 | } |
428 | st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12), |
429 | p2i(bottom()), p2i(top()), p2i(end())); |
430 | st->print("|TAMS " INTPTR_FORMAT_W(12), |
431 | p2i(_heap->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this)))); |
432 | st->print("|U " SIZE_FORMAT_W(5) "%1s" , byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); |
433 | st->print("|T " SIZE_FORMAT_W(5) "%1s" , byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs())); |
434 | st->print("|G " SIZE_FORMAT_W(5) "%1s" , byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs())); |
435 | st->print("|S " SIZE_FORMAT_W(5) "%1s" , byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs())); |
436 | st->print("|L " SIZE_FORMAT_W(5) "%1s" , byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes())); |
437 | st->print("|CP " SIZE_FORMAT_W(3), _critical_pins); |
438 | st->print("|SN " UINT64_FORMAT_X_W(12) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8), |
439 | seqnum_first_alloc_mutator(), seqnum_last_alloc_mutator(), |
440 | seqnum_first_alloc_gc(), seqnum_last_alloc_gc()); |
441 | st->cr(); |
442 | } |
443 | |
444 | void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) { |
445 | if (!is_active()) return; |
446 | if (is_humongous()) { |
447 | oop_iterate_humongous(blk); |
448 | } else { |
449 | oop_iterate_objects(blk); |
450 | } |
451 | } |
452 | |
453 | void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) { |
454 | assert(! is_humongous(), "no humongous region here" ); |
455 | HeapWord* obj_addr = bottom(); |
456 | HeapWord* t = top(); |
457 | // Could call objects iterate, but this is easier. |
458 | while (obj_addr < t) { |
459 | oop obj = oop(obj_addr); |
460 | obj_addr += obj->oop_iterate_size(blk); |
461 | } |
462 | } |
463 | |
464 | void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) { |
465 | assert(is_humongous(), "only humongous region here" ); |
466 | // Find head. |
467 | ShenandoahHeapRegion* r = humongous_start_region(); |
468 | assert(r->is_humongous_start(), "need humongous head here" ); |
469 | oop obj = oop(r->bottom()); |
470 | obj->oop_iterate(blk, MemRegion(bottom(), top())); |
471 | } |
472 | |
473 | ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const { |
474 | assert(is_humongous(), "Must be a part of the humongous region" ); |
475 | size_t reg_num = region_number(); |
476 | ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this); |
477 | while (!r->is_humongous_start()) { |
478 | assert(reg_num > 0, "Sanity" ); |
479 | reg_num --; |
480 | r = _heap->get_region(reg_num); |
481 | assert(r->is_humongous(), "Must be a part of the humongous region" ); |
482 | } |
483 | assert(r->is_humongous_start(), "Must be" ); |
484 | return r; |
485 | } |
486 | |
487 | void ShenandoahHeapRegion::recycle() { |
488 | ContiguousSpace::clear(false); |
489 | if (ZapUnusedHeapArea) { |
490 | ContiguousSpace::mangle_unused_area_complete(); |
491 | } |
492 | clear_live_data(); |
493 | |
494 | reset_alloc_metadata(); |
495 | |
496 | _heap->marking_context()->reset_top_at_mark_start(this); |
497 | |
498 | make_empty(); |
499 | } |
500 | |
501 | HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const { |
502 | assert(MemRegion(bottom(), end()).contains(p), |
503 | "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")" , |
504 | p2i(p), p2i(bottom()), p2i(end())); |
505 | if (p >= top()) { |
506 | return top(); |
507 | } else { |
508 | HeapWord* last = bottom(); |
509 | HeapWord* cur = last; |
510 | while (cur <= p) { |
511 | last = cur; |
512 | cur += oop(cur)->size(); |
513 | } |
514 | shenandoah_assert_correct(NULL, oop(last)); |
515 | return last; |
516 | } |
517 | } |
518 | |
519 | void ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) { |
520 | // Absolute minimums we should not ever break. |
521 | static const size_t MIN_REGION_SIZE = 256*K; |
522 | |
523 | if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) { |
524 | FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE); |
525 | } |
526 | |
527 | size_t region_size; |
528 | if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { |
529 | if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) { |
530 | err_msg message("Max heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " |
531 | "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K)." , |
532 | max_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K); |
533 | vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option" , message); |
534 | } |
535 | if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { |
536 | err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K)." , |
537 | ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K); |
538 | vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option" , message); |
539 | } |
540 | if (ShenandoahMinRegionSize < MinTLABSize) { |
541 | err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K)." , |
542 | ShenandoahMinRegionSize/K, MinTLABSize/K); |
543 | vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option" , message); |
544 | } |
545 | if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { |
546 | err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K)." , |
547 | ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K); |
548 | vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option" , message); |
549 | } |
550 | if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { |
551 | err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K)." , |
552 | ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K); |
553 | vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize" , message); |
554 | } |
555 | |
556 | // We rapidly expand to max_heap_size in most scenarios, so that is the measure |
557 | // for usual heap sizes. Do not depend on initial_heap_size here. |
558 | region_size = max_heap_size / ShenandoahTargetNumRegions; |
559 | |
560 | // Now make sure that we don't go over or under our limits. |
561 | region_size = MAX2(ShenandoahMinRegionSize, region_size); |
562 | region_size = MIN2(ShenandoahMaxRegionSize, region_size); |
563 | |
564 | } else { |
565 | if (ShenandoahHeapRegionSize > max_heap_size / MIN_NUM_REGIONS) { |
566 | err_msg message("Max heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " |
567 | "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K)." , |
568 | max_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K); |
569 | vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option" , message); |
570 | } |
571 | if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) { |
572 | err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K)." , |
573 | ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K); |
574 | vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option" , message); |
575 | } |
576 | if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) { |
577 | err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K)." , |
578 | ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K); |
579 | vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option" , message); |
580 | } |
581 | region_size = ShenandoahHeapRegionSize; |
582 | } |
583 | |
584 | // Make sure region size is at least one large page, if enabled. |
585 | // Otherwise, uncommitting one region may falsely uncommit the adjacent |
586 | // regions too. |
587 | // Also see shenandoahArguments.cpp, where it handles UseLargePages. |
588 | if (UseLargePages && ShenandoahUncommit) { |
589 | region_size = MAX2(region_size, os::large_page_size()); |
590 | } |
591 | |
592 | int region_size_log = log2_long((jlong) region_size); |
593 | // Recalculate the region size to make sure it's a power of |
594 | // 2. This means that region_size is the largest power of 2 that's |
595 | // <= what we've calculated so far. |
596 | region_size = size_t(1) << region_size_log; |
597 | |
598 | // Now, set up the globals. |
599 | guarantee(RegionSizeBytesShift == 0, "we should only set it once" ); |
600 | RegionSizeBytesShift = (size_t)region_size_log; |
601 | |
602 | guarantee(RegionSizeWordsShift == 0, "we should only set it once" ); |
603 | RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize; |
604 | |
605 | guarantee(RegionSizeBytes == 0, "we should only set it once" ); |
606 | RegionSizeBytes = region_size; |
607 | RegionSizeWords = RegionSizeBytes >> LogHeapWordSize; |
608 | assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity" ); |
609 | |
610 | guarantee(RegionSizeWordsMask == 0, "we should only set it once" ); |
611 | RegionSizeWordsMask = RegionSizeWords - 1; |
612 | |
613 | guarantee(RegionSizeBytesMask == 0, "we should only set it once" ); |
614 | RegionSizeBytesMask = RegionSizeBytes - 1; |
615 | |
616 | guarantee(RegionCount == 0, "we should only set it once" ); |
617 | RegionCount = max_heap_size / RegionSizeBytes; |
618 | guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions" ); |
619 | |
620 | guarantee(HumongousThresholdWords == 0, "we should only set it once" ); |
621 | HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100; |
622 | HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment); |
623 | assert (HumongousThresholdWords <= RegionSizeWords, "sanity" ); |
624 | |
625 | guarantee(HumongousThresholdBytes == 0, "we should only set it once" ); |
626 | HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize; |
627 | assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity" ); |
628 | |
629 | // The rationale for trimming the TLAB sizes has to do with the raciness in |
630 | // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah |
631 | // about next free size, gets the answer for region #N, goes away for a while, then |
632 | // tries to allocate in region #N, and fail because some other thread have claimed part |
633 | // of the region #N, and then the freeset allocation code has to retire the region #N, |
634 | // before moving the allocation to region #N+1. |
635 | // |
636 | // The worst case realizes when "answer" is "region size", which means it could |
637 | // prematurely retire an entire region. Having smaller TLABs does not fix that |
638 | // completely, but reduces the probability of too wasteful region retirement. |
639 | // With current divisor, we will waste no more than 1/8 of region size in the worst |
640 | // case. This also has a secondary effect on collection set selection: even under |
641 | // the race, the regions would be at least 7/8 used, which allows relying on |
642 | // "used" - "live" for cset selection. Otherwise, we can get the fragmented region |
643 | // below the garbage threshold that would never be considered for collection. |
644 | // |
645 | // The whole thing is mitigated if Elastic TLABs are enabled. |
646 | // |
647 | guarantee(MaxTLABSizeWords == 0, "we should only set it once" ); |
648 | MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords); |
649 | MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment); |
650 | |
651 | guarantee(MaxTLABSizeBytes == 0, "we should only set it once" ); |
652 | MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize; |
653 | assert (MaxTLABSizeBytes > MinTLABSize, "should be larger" ); |
654 | |
655 | log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s" , |
656 | RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes)); |
657 | log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s" , |
658 | byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes)); |
659 | log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s" , |
660 | byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes)); |
661 | } |
662 | |
663 | void ShenandoahHeapRegion::do_commit() { |
664 | if (!_heap->is_heap_region_special() && !os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) { |
665 | report_java_out_of_memory("Unable to commit region" ); |
666 | } |
667 | if (!_heap->commit_bitmap_slice(this)) { |
668 | report_java_out_of_memory("Unable to commit bitmaps for region" ); |
669 | } |
670 | _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes()); |
671 | } |
672 | |
673 | void ShenandoahHeapRegion::do_uncommit() { |
674 | if (!_heap->is_heap_region_special() && !os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) { |
675 | report_java_out_of_memory("Unable to uncommit region" ); |
676 | } |
677 | if (!_heap->uncommit_bitmap_slice(this)) { |
678 | report_java_out_of_memory("Unable to uncommit bitmaps for region" ); |
679 | } |
680 | _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes()); |
681 | } |
682 | |
683 | void ShenandoahHeapRegion::set_state(RegionState to) { |
684 | EventShenandoahHeapRegionStateChange evt; |
685 | if (evt.should_commit()){ |
686 | evt.set_index((unsigned)region_number()); |
687 | evt.set_start((uintptr_t)bottom()); |
688 | evt.set_used(used()); |
689 | evt.set_from(_state); |
690 | evt.set_to(to); |
691 | evt.commit(); |
692 | } |
693 | _state = to; |
694 | } |
695 | |