1/*
2 * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
26#define SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28#include "gc/g1/g1CardTable.hpp"
29#include "gc/g1/g1CollectedHeap.hpp"
30#include "gc/g1/g1DirtyCardQueue.hpp"
31#include "gc/g1/g1OopClosures.hpp"
32#include "gc/g1/g1Policy.hpp"
33#include "gc/g1/g1RemSet.hpp"
34#include "gc/g1/heapRegionRemSet.hpp"
35#include "gc/shared/ageTable.hpp"
36#include "memory/allocation.hpp"
37#include "oops/oop.hpp"
38#include "utilities/ticks.hpp"
39
40class G1OopStarChunkedList;
41class G1PLABAllocator;
42class G1EvacuationRootClosures;
43class HeapRegion;
44class outputStream;
45
46class G1ParScanThreadState : public CHeapObj<mtGC> {
47 G1CollectedHeap* _g1h;
48 RefToScanQueue* _refs;
49 G1DirtyCardQueue _dcq;
50 G1CardTable* _ct;
51 G1EvacuationRootClosures* _closures;
52
53 G1PLABAllocator* _plab_allocator;
54
55 AgeTable _age_table;
56 G1HeapRegionAttr _dest[G1HeapRegionAttr::Num];
57 // Local tenuring threshold.
58 uint _tenuring_threshold;
59 G1ScanEvacuatedObjClosure _scanner;
60
61 uint _worker_id;
62
63 // Upper and lower threshold to start and end work queue draining.
64 uint const _stack_trim_upper_threshold;
65 uint const _stack_trim_lower_threshold;
66
67 Tickspan _trim_ticks;
68 // Map from young-age-index (0 == not young, 1 is youngest) to
69 // surviving words. base is what we get back from the malloc call
70 size_t* _surviving_young_words_base;
71 // this points into the array, as we use the first few entries for padding
72 size_t* _surviving_young_words;
73
74 // Indicates whether in the last generation (old) there is no more space
75 // available for allocation.
76 bool _old_gen_is_full;
77
78#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
79
80 G1DirtyCardQueue& dirty_card_queue() { return _dcq; }
81 G1CardTable* ct() { return _ct; }
82
83 G1HeapRegionAttr dest(G1HeapRegionAttr original) const {
84 assert(original.is_valid(),
85 "Original region attr invalid: %s", original.get_type_str());
86 assert(_dest[original.type()].is_valid_gen(),
87 "Dest region attr is invalid: %s", _dest[original.type()].get_type_str());
88 return _dest[original.type()];
89 }
90
91 size_t _num_optional_regions;
92 G1OopStarChunkedList* _oops_into_optional_regions;
93
94public:
95 G1ParScanThreadState(G1CollectedHeap* g1h,
96 uint worker_id,
97 size_t young_cset_length,
98 size_t optional_cset_length);
99 virtual ~G1ParScanThreadState();
100
101 void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
102
103#ifdef ASSERT
104 bool queue_is_empty() const { return _refs->is_empty(); }
105
106 bool verify_ref(narrowOop* ref) const;
107 bool verify_ref(oop* ref) const;
108 bool verify_task(StarTask ref) const;
109#endif // ASSERT
110
111 template <class T> void do_oop_ext(T* ref);
112 template <class T> void push_on_queue(T* ref);
113
114 template <class T> void enqueue_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o) {
115 assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
116 assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
117
118#ifdef ASSERT
119 HeapRegion* const hr_obj = _g1h->heap_region_containing((HeapWord*)o);
120 assert(region_attr.needs_remset_update() == hr_obj->rem_set()->is_tracked(),
121 "State flag indicating remset tracking disagrees (%s) with actual remembered set (%s) for region %u",
122 BOOL_TO_STR(region_attr.needs_remset_update()),
123 BOOL_TO_STR(hr_obj->rem_set()->is_tracked()),
124 hr_obj->hrm_index());
125#endif
126 if (!region_attr.needs_remset_update()) {
127 return;
128 }
129 size_t card_index = ct()->index_for(p);
130 // If the card hasn't been added to the buffer, do it.
131 if (ct()->mark_card_deferred(card_index)) {
132 dirty_card_queue().enqueue(ct()->byte_for_index(card_index));
133 }
134 }
135
136 G1EvacuationRootClosures* closures() { return _closures; }
137 uint worker_id() { return _worker_id; }
138
139 size_t lab_waste_words() const;
140 size_t lab_undo_waste_words() const;
141
142 size_t* surviving_young_words() {
143 // We add one to hide entry 0 which accumulates surviving words for
144 // age -1 regions (i.e. non-young ones)
145 return _surviving_young_words + 1;
146 }
147
148 void flush(size_t* surviving_young_words);
149
150private:
151 #define G1_PARTIAL_ARRAY_MASK 0x2
152
153 inline bool has_partial_array_mask(oop* ref) const {
154 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
155 }
156
157 // We never encode partial array oops as narrowOop*, so return false immediately.
158 // This allows the compiler to create optimized code when popping references from
159 // the work queue.
160 inline bool has_partial_array_mask(narrowOop* ref) const {
161 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
162 return false;
163 }
164
165 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
166 // We always encode partial arrays as regular oop, to allow the
167 // specialization for has_partial_array_mask() for narrowOops above.
168 // This means that unintentional use of this method with narrowOops are caught
169 // by the compiler.
170 inline oop* set_partial_array_mask(oop obj) const {
171 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
172 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
173 }
174
175 inline oop clear_partial_array_mask(oop* ref) const {
176 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
177 }
178
179 inline void do_oop_partial_array(oop* p);
180
181 // This method is applied to the fields of the objects that have just been copied.
182 template <class T> inline void do_oop_evac(T* p);
183
184 inline void deal_with_reference(oop* ref_to_scan);
185 inline void deal_with_reference(narrowOop* ref_to_scan);
186
187 inline void dispatch_reference(StarTask ref);
188
189 // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
190 // allocate into dest. State is the original (source) cset state for the object
191 // that is allocated for. Previous_plab_refill_failed indicates whether previously
192 // a PLAB refill into "state" failed.
193 // Returns a non-NULL pointer if successful, and updates dest if required.
194 // Also determines whether we should continue to try to allocate into the various
195 // generations or just end trying to allocate.
196 HeapWord* allocate_in_next_plab(G1HeapRegionAttr const region_attr,
197 G1HeapRegionAttr* dest,
198 size_t word_sz,
199 bool previous_plab_refill_failed);
200
201 inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age);
202
203 void report_promotion_event(G1HeapRegionAttr const dest_attr,
204 oop const old, size_t word_sz, uint age,
205 HeapWord * const obj_ptr) const;
206
207 inline bool needs_partial_trimming() const;
208 inline bool is_partially_trimmed() const;
209
210 inline void trim_queue_to_threshold(uint threshold);
211public:
212 oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markOop const old_mark);
213
214 void trim_queue();
215 void trim_queue_partially();
216
217 Tickspan trim_ticks() const;
218 void reset_trim_ticks();
219
220 inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
221
222 // An attempt to evacuate "obj" has failed; take necessary steps.
223 oop handle_evacuation_failure_par(oop obj, markOop m);
224
225 template <typename T>
226 inline void remember_root_into_optional_region(T* p);
227 template <typename T>
228 inline void remember_reference_into_optional_region(T* p);
229
230 inline G1OopStarChunkedList* oops_into_optional_region(const HeapRegion* hr);
231};
232
233class G1ParScanThreadStateSet : public StackObj {
234 G1CollectedHeap* _g1h;
235 G1ParScanThreadState** _states;
236 size_t* _surviving_young_words_total;
237 size_t _young_cset_length;
238 size_t _optional_cset_length;
239 uint _n_workers;
240 bool _flushed;
241
242 public:
243 G1ParScanThreadStateSet(G1CollectedHeap* g1h,
244 uint n_workers,
245 size_t young_cset_length,
246 size_t optional_cset_length);
247 ~G1ParScanThreadStateSet();
248
249 void flush();
250 void record_unused_optional_region(HeapRegion* hr);
251
252 G1ParScanThreadState* state_for_worker(uint worker_id);
253
254 const size_t* surviving_young_words() const;
255
256 private:
257 G1ParScanThreadState* new_par_scan_state(uint worker_id, size_t young_cset_length);
258};
259
260#endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
261