1/*
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26#define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28#include "gc/g1/g1BarrierSet.hpp"
29#include "gc/g1/g1CollectedHeap.hpp"
30#include "gc/g1/g1CollectorState.hpp"
31#include "gc/g1/g1Policy.hpp"
32#include "gc/g1/g1RemSet.hpp"
33#include "gc/g1/heapRegionManager.inline.hpp"
34#include "gc/g1/heapRegionRemSet.hpp"
35#include "gc/g1/heapRegionSet.inline.hpp"
36#include "gc/shared/taskqueue.inline.hpp"
37#include "runtime/orderAccess.hpp"
38
39G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
40 return _policy->phase_times();
41}
42
43G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
44 switch (dest.type()) {
45 case G1HeapRegionAttr::Young:
46 return &_survivor_evac_stats;
47 case G1HeapRegionAttr::Old:
48 return &_old_evac_stats;
49 default:
50 ShouldNotReachHere();
51 return NULL; // Keep some compilers happy
52 }
53}
54
55size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
56 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
57 // Prevent humongous PLAB sizes for two reasons:
58 // * PLABs are allocated using a similar paths as oops, but should
59 // never be in a humongous region
60 // * Allowing humongous PLABs needlessly churns the region free lists
61 return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
62}
63
64// Inline functions for G1CollectedHeap
65
66// Return the region with the given index. It assumes the index is valid.
67inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
68
69// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
70inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
71
72inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
73 return _hrm->next_region_in_humongous(hr);
74}
75
76inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
77 assert(is_in_reserved(addr),
78 "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
79 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
80 return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
81}
82
83inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
84 return _hrm->reserved().start() + index * HeapRegion::GrainWords;
85}
86
87template <class T>
88inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
89 assert(addr != NULL, "invariant");
90 assert(is_in_g1_reserved((const void*) addr),
91 "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
92 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
93 return _hrm->addr_to_region((HeapWord*) addr);
94}
95
96template <class T>
97inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
98 assert(addr != NULL, "invariant");
99 assert(is_in_g1_reserved((const void*) addr),
100 "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
101 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
102 uint const region_idx = addr_to_region(addr);
103 return region_at_or_null(region_idx);
104}
105
106inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
107 _old_set.add(hr);
108}
109
110inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
111 _old_set.remove(hr);
112}
113
114inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
115 _archive_set.add(hr);
116}
117
118// It dirties the cards that cover the block so that the post
119// write barrier never queues anything when updating objects on this
120// block. It is assumed (and in fact we assert) that the block
121// belongs to a young region.
122inline void
123G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
124 assert_heap_not_locked();
125
126 // Assign the containing region to containing_hr so that we don't
127 // have to keep calling heap_region_containing() in the
128 // asserts below.
129 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
130 assert(word_size > 0, "pre-condition");
131 assert(containing_hr->is_in(start), "it should contain start");
132 assert(containing_hr->is_young(), "it should be young");
133 assert(!containing_hr->is_humongous(), "it should not be humongous");
134
135 HeapWord* end = start + word_size;
136 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
137
138 MemRegion mr(start, end);
139 card_table()->g1_mark_as_young(mr);
140}
141
142inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
143 return _task_queues->queue(i);
144}
145
146inline bool G1CollectedHeap::is_marked_next(oop obj) const {
147 return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj);
148}
149
150inline bool G1CollectedHeap::is_in_cset(oop obj) {
151 return is_in_cset((HeapWord*)obj);
152}
153
154inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
155 return _region_attr.is_in_cset(addr);
156}
157
158bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
159 return _region_attr.is_in_cset(hr);
160}
161
162bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
163 return _region_attr.is_in_cset_or_humongous((HeapWord*)obj);
164}
165
166G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) {
167 return _region_attr.at((HeapWord*)addr);
168}
169
170void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
171 _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
172}
173
174void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
175 _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
176}
177
178void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
179 _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
180 _rem_set->prepare_for_scan_rem_set(r->hrm_index());
181}
182
183void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
184 _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
185}
186
187#ifndef PRODUCT
188// Support for G1EvacuationFailureALot
189
190inline bool
191G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
192 bool during_initial_mark,
193 bool mark_or_rebuild_in_progress) {
194 bool res = false;
195 if (mark_or_rebuild_in_progress) {
196 res |= G1EvacuationFailureALotDuringConcMark;
197 }
198 if (during_initial_mark) {
199 res |= G1EvacuationFailureALotDuringInitialMark;
200 }
201 if (for_young_gc) {
202 res |= G1EvacuationFailureALotDuringYoungGC;
203 } else {
204 // GCs are mixed
205 res |= G1EvacuationFailureALotDuringMixedGC;
206 }
207 return res;
208}
209
210inline void
211G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
212 if (G1EvacuationFailureALot) {
213 // Note we can't assert that _evacuation_failure_alot_for_current_gc
214 // is clear here. It may have been set during a previous GC but that GC
215 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
216 // trigger an evacuation failure and clear the flags and and counts.
217
218 // Check if we have gone over the interval.
219 const size_t gc_num = total_collections();
220 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
221
222 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
223
224 // Now check if G1EvacuationFailureALot is enabled for the current GC type.
225 const bool in_young_only_phase = collector_state()->in_young_only_phase();
226 const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
227 const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
228
229 _evacuation_failure_alot_for_current_gc &=
230 evacuation_failure_alot_for_gc_type(in_young_only_phase,
231 in_initial_mark_gc,
232 mark_or_rebuild_in_progress);
233 }
234}
235
236inline bool G1CollectedHeap::evacuation_should_fail() {
237 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
238 return false;
239 }
240 // G1EvacuationFailureALot is in effect for current GC
241 // Access to _evacuation_failure_alot_count is not atomic;
242 // the value does not have to be exact.
243 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
244 return false;
245 }
246 _evacuation_failure_alot_count = 0;
247 return true;
248}
249
250inline void G1CollectedHeap::reset_evacuation_should_fail() {
251 if (G1EvacuationFailureALot) {
252 _evacuation_failure_alot_gc_number = total_collections();
253 _evacuation_failure_alot_count = 0;
254 _evacuation_failure_alot_for_current_gc = false;
255 }
256}
257#endif // #ifndef PRODUCT
258
259inline bool G1CollectedHeap::is_in_young(const oop obj) {
260 if (obj == NULL) {
261 return false;
262 }
263 return heap_region_containing(obj)->is_young();
264}
265
266inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
267 if (obj == NULL) {
268 return false;
269 }
270 return is_obj_dead(obj, heap_region_containing(obj));
271}
272
273inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
274 if (obj == NULL) {
275 return false;
276 }
277 return is_obj_ill(obj, heap_region_containing(obj));
278}
279
280inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
281 return !is_marked_next(obj) && !hr->is_archive();
282}
283
284inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
285 return is_obj_dead_full(obj, heap_region_containing(obj));
286}
287
288inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
289 assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
290 _humongous_reclaim_candidates.set_candidate(region, value);
291}
292
293inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
294 assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
295 return _humongous_reclaim_candidates.is_candidate(region);
296}
297
298inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
299 uint region = addr_to_region((HeapWord*)obj);
300 // Clear the flag in the humongous_reclaim_candidates table. Also
301 // reset the entry in the region attribute table so that subsequent references
302 // to the same humongous object do not go into the slow path again.
303 // This is racy, as multiple threads may at the same time enter here, but this
304 // is benign.
305 // During collection we only ever clear the "candidate" flag, and only ever clear the
306 // entry in the in_cset_fast_table.
307 // We only ever evaluate the contents of these tables (in the VM thread) after
308 // having synchronized the worker threads with the VM thread, or in the same
309 // thread (i.e. within the VM thread).
310 if (is_humongous_reclaim_candidate(region)) {
311 set_humongous_reclaim_candidate(region, false);
312 _region_attr.clear_humongous(region);
313 }
314}
315
316#endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
317