1 | /* |
2 | * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_GC_G1_G1POLICY_HPP |
26 | #define SHARE_GC_G1_G1POLICY_HPP |
27 | |
28 | #include "gc/g1/g1CollectorState.hpp" |
29 | #include "gc/g1/g1GCPhaseTimes.hpp" |
30 | #include "gc/g1/g1HeapRegionAttr.hpp" |
31 | #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp" |
32 | #include "gc/g1/g1MMUTracker.hpp" |
33 | #include "gc/g1/g1RemSetTrackingPolicy.hpp" |
34 | #include "gc/g1/g1Predictions.hpp" |
35 | #include "gc/g1/g1YoungGenSizer.hpp" |
36 | #include "gc/shared/gcCause.hpp" |
37 | #include "utilities/pair.hpp" |
38 | |
39 | // A G1Policy makes policy decisions that determine the |
40 | // characteristics of the collector. Examples include: |
41 | // * choice of collection set. |
42 | // * when to collect. |
43 | |
44 | class HeapRegion; |
45 | class G1CollectionSet; |
46 | class G1CollectionSetCandidates; |
47 | class G1CollectionSetChooser; |
48 | class G1IHOPControl; |
49 | class G1Analytics; |
50 | class G1SurvivorRegions; |
51 | class G1YoungGenSizer; |
52 | class GCPolicyCounters; |
53 | class STWGCTimer; |
54 | |
55 | class G1Policy: public CHeapObj<mtGC> { |
56 | private: |
57 | |
58 | static G1IHOPControl* create_ihop_control(const G1Predictions* predictor); |
59 | // Update the IHOP control with necessary statistics. |
60 | void update_ihop_prediction(double mutator_time_s, |
61 | size_t mutator_alloc_bytes, |
62 | size_t young_gen_size, |
63 | bool this_gc_was_young_only); |
64 | void report_ihop_statistics(); |
65 | |
66 | G1Predictions _predictor; |
67 | G1Analytics* _analytics; |
68 | G1RemSetTrackingPolicy _remset_tracker; |
69 | G1MMUTracker* _mmu_tracker; |
70 | G1IHOPControl* _ihop_control; |
71 | |
72 | GCPolicyCounters* _policy_counters; |
73 | |
74 | double _full_collection_start_sec; |
75 | |
76 | jlong _collection_pause_end_millis; |
77 | |
78 | uint _young_list_target_length; |
79 | uint _young_list_fixed_length; |
80 | |
81 | // The max number of regions we can extend the eden by while the GC |
82 | // locker is active. This should be >= _young_list_target_length; |
83 | uint _young_list_max_length; |
84 | |
85 | // SurvRateGroups below must be initialized after the predictor because they |
86 | // indirectly use it through this object passed to their constructor. |
87 | SurvRateGroup* _short_lived_surv_rate_group; |
88 | SurvRateGroup* _survivor_surv_rate_group; |
89 | |
90 | double _reserve_factor; |
91 | // This will be set when the heap is expanded |
92 | // for the first time during initialization. |
93 | uint _reserve_regions; |
94 | |
95 | G1YoungGenSizer* _young_gen_sizer; |
96 | |
97 | uint _free_regions_at_end_of_collection; |
98 | |
99 | size_t _max_rs_lengths; |
100 | |
101 | size_t _rs_lengths_prediction; |
102 | |
103 | size_t _pending_cards; |
104 | |
105 | // The amount of allocated bytes in old gen during the last mutator and the following |
106 | // young GC phase. |
107 | size_t _bytes_allocated_in_old_since_last_gc; |
108 | |
109 | G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; |
110 | |
111 | bool should_update_surv_rate_group_predictors() { |
112 | return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress(); |
113 | } |
114 | public: |
115 | const G1Predictions& predictor() const { return _predictor; } |
116 | const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); } |
117 | |
118 | G1RemSetTrackingPolicy* remset_tracker() { return &_remset_tracker; } |
119 | |
120 | // Add the given number of bytes to the total number of allocated bytes in the old gen. |
121 | void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; } |
122 | |
123 | void set_region_eden(HeapRegion* hr) { |
124 | hr->set_eden(); |
125 | hr->install_surv_rate_group(_short_lived_surv_rate_group); |
126 | } |
127 | |
128 | void set_region_survivor(HeapRegion* hr) { |
129 | assert(hr->is_survivor(), "pre-condition" ); |
130 | hr->install_surv_rate_group(_survivor_surv_rate_group); |
131 | } |
132 | |
133 | void record_max_rs_lengths(size_t rs_lengths) { |
134 | _max_rs_lengths = rs_lengths; |
135 | } |
136 | |
137 | double predict_base_elapsed_time_ms(size_t pending_cards) const; |
138 | double predict_base_elapsed_time_ms(size_t pending_cards, |
139 | size_t scanned_cards) const; |
140 | size_t predict_bytes_to_copy(HeapRegion* hr) const; |
141 | double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const; |
142 | |
143 | double predict_survivor_regions_evac_time() const; |
144 | |
145 | void cset_regions_freed() { |
146 | bool update = should_update_surv_rate_group_predictors(); |
147 | |
148 | _short_lived_surv_rate_group->all_surviving_words_recorded(predictor(), update); |
149 | _survivor_surv_rate_group->all_surviving_words_recorded(predictor(), update); |
150 | } |
151 | |
152 | G1MMUTracker* mmu_tracker() { |
153 | return _mmu_tracker; |
154 | } |
155 | |
156 | const G1MMUTracker* mmu_tracker() const { |
157 | return _mmu_tracker; |
158 | } |
159 | |
160 | double max_pause_time_ms() const { |
161 | return _mmu_tracker->max_gc_time() * 1000.0; |
162 | } |
163 | |
164 | double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const; |
165 | |
166 | double predict_yg_surv_rate(int age) const; |
167 | |
168 | double accum_yg_surv_rate_pred(int age) const; |
169 | |
170 | private: |
171 | G1CollectionSet* _collection_set; |
172 | double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const; |
173 | double other_time_ms(double pause_time_ms) const; |
174 | |
175 | double young_other_time_ms() const; |
176 | double non_young_other_time_ms() const; |
177 | double constant_other_time_ms(double pause_time_ms) const; |
178 | |
179 | G1CollectionSetChooser* cset_chooser() const; |
180 | |
181 | // The number of bytes copied during the GC. |
182 | size_t _bytes_copied_during_gc; |
183 | |
184 | // Stash a pointer to the g1 heap. |
185 | G1CollectedHeap* _g1h; |
186 | |
187 | G1GCPhaseTimes* _phase_times; |
188 | |
189 | // This set of variables tracks the collector efficiency, in order to |
190 | // determine whether we should initiate a new marking. |
191 | double ; |
192 | double _mark_cleanup_start_sec; |
193 | |
194 | // Updates the internal young list maximum and target lengths. Returns the |
195 | // unbounded young list target length. |
196 | uint update_young_list_max_and_target_length(); |
197 | uint update_young_list_max_and_target_length(size_t rs_lengths); |
198 | |
199 | // Update the young list target length either by setting it to the |
200 | // desired fixed value or by calculating it using G1's pause |
201 | // prediction model. If no rs_lengths parameter is passed, predict |
202 | // the RS lengths using the prediction model, otherwise use the |
203 | // given rs_lengths as the prediction. |
204 | // Returns the unbounded young list target length. |
205 | uint update_young_list_target_length(size_t rs_lengths); |
206 | |
207 | // Calculate and return the minimum desired young list target |
208 | // length. This is the minimum desired young list length according |
209 | // to the user's inputs. |
210 | uint calculate_young_list_desired_min_length(uint base_min_length) const; |
211 | |
212 | // Calculate and return the maximum desired young list target |
213 | // length. This is the maximum desired young list length according |
214 | // to the user's inputs. |
215 | uint calculate_young_list_desired_max_length() const; |
216 | |
217 | // Calculate and return the maximum young list target length that |
218 | // can fit into the pause time goal. The parameters are: rs_lengths |
219 | // represent the prediction of how large the young RSet lengths will |
220 | // be, base_min_length is the already existing number of regions in |
221 | // the young list, min_length and max_length are the desired min and |
222 | // max young list length according to the user's inputs. |
223 | uint calculate_young_list_target_length(size_t rs_lengths, |
224 | uint base_min_length, |
225 | uint desired_min_length, |
226 | uint desired_max_length) const; |
227 | |
228 | // Result of the bounded_young_list_target_length() method, containing both the |
229 | // bounded as well as the unbounded young list target lengths in this order. |
230 | typedef Pair<uint, uint, StackObj> YoungTargetLengths; |
231 | YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const; |
232 | |
233 | void update_rs_lengths_prediction(); |
234 | void update_rs_lengths_prediction(size_t prediction); |
235 | |
236 | // Check whether a given young length (young_length) fits into the |
237 | // given target pause time and whether the prediction for the amount |
238 | // of objects to be copied for the given length will fit into the |
239 | // given free space (expressed by base_free_regions). It is used by |
240 | // calculate_young_list_target_length(). |
241 | bool predict_will_fit(uint young_length, double base_time_ms, |
242 | uint base_free_regions, double target_pause_time_ms) const; |
243 | |
244 | public: |
245 | size_t pending_cards() const { return _pending_cards; } |
246 | |
247 | // Calculate the minimum number of old regions we'll add to the CSet |
248 | // during a mixed GC. |
249 | uint calc_min_old_cset_length() const; |
250 | |
251 | // Calculate the maximum number of old regions we'll add to the CSet |
252 | // during a mixed GC. |
253 | uint calc_max_old_cset_length() const; |
254 | |
255 | // Returns the given amount of reclaimable bytes (that represents |
256 | // the amount of reclaimable space still to be collected) as a |
257 | // percentage of the current heap capacity. |
258 | double reclaimable_bytes_percent(size_t reclaimable_bytes) const; |
259 | |
260 | jlong collection_pause_end_millis() { return _collection_pause_end_millis; } |
261 | |
262 | private: |
263 | void clear_collection_set_candidates(); |
264 | // Sets up marking if proper conditions are met. |
265 | void maybe_start_marking(); |
266 | |
267 | // The kind of STW pause. |
268 | enum PauseKind { |
269 | FullGC, |
270 | YoungOnlyGC, |
271 | MixedGC, |
272 | LastYoungGC, |
273 | InitialMarkGC, |
274 | Cleanup, |
275 | |
276 | }; |
277 | |
278 | // Calculate PauseKind from internal state. |
279 | PauseKind young_gc_pause_kind() const; |
280 | // Record the given STW pause with the given start and end times (in s). |
281 | void record_pause(PauseKind kind, double start, double end); |
282 | // Indicate that we aborted marking before doing any mixed GCs. |
283 | void abort_time_to_mixed_tracking(); |
284 | public: |
285 | |
286 | G1Policy(STWGCTimer* gc_timer); |
287 | |
288 | virtual ~G1Policy(); |
289 | |
290 | static G1Policy* create_policy(STWGCTimer* gc_timer_stw); |
291 | |
292 | G1CollectorState* collector_state() const; |
293 | |
294 | G1GCPhaseTimes* phase_times() const { return _phase_times; } |
295 | |
296 | // Check the current value of the young list RSet lengths and |
297 | // compare it against the last prediction. If the current value is |
298 | // higher, recalculate the young list target length prediction. |
299 | void revise_young_list_target_length_if_necessary(size_t rs_lengths); |
300 | |
301 | // This should be called after the heap is resized. |
302 | void record_new_heap_size(uint new_number_of_regions); |
303 | |
304 | virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set); |
305 | |
306 | void note_gc_start(); |
307 | |
308 | bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); |
309 | |
310 | bool about_to_start_mixed_phase() const; |
311 | |
312 | // Record the start and end of an evacuation pause. |
313 | void record_collection_pause_start(double start_time_sec); |
314 | virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc); |
315 | |
316 | // Record the start and end of a full collection. |
317 | void record_full_collection_start(); |
318 | virtual void record_full_collection_end(); |
319 | |
320 | // Must currently be called while the world is stopped. |
321 | void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); |
322 | |
323 | // Record start and end of remark. |
324 | void (); |
325 | void (); |
326 | |
327 | // Record start, end, and completion of cleanup. |
328 | void record_concurrent_mark_cleanup_start(); |
329 | void record_concurrent_mark_cleanup_end(); |
330 | |
331 | void print_phases(); |
332 | |
333 | // Record how much space we copied during a GC. This is typically |
334 | // called when a GC alloc region is being retired. |
335 | void record_bytes_copied_during_gc(size_t bytes) { |
336 | _bytes_copied_during_gc += bytes; |
337 | } |
338 | |
339 | // The amount of space we copied during a GC. |
340 | size_t bytes_copied_during_gc() const { |
341 | return _bytes_copied_during_gc; |
342 | } |
343 | |
344 | bool next_gc_should_be_mixed(const char* true_action_str, |
345 | const char* false_action_str) const; |
346 | |
347 | // Calculate and return the number of initial and optional old gen regions from |
348 | // the given collection set candidates and the remaining time. |
349 | void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates, |
350 | double time_remaining_ms, |
351 | uint& num_initial_regions, |
352 | uint& num_optional_regions); |
353 | |
354 | // Calculate the number of optional regions from the given collection set candidates, |
355 | // the remaining time and the maximum number of these regions and return the number |
356 | // of actually selected regions in num_optional_regions. |
357 | void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates, |
358 | uint const max_optional_regions, |
359 | double time_remaining_ms, |
360 | uint& num_optional_regions); |
361 | |
362 | private: |
363 | // Set the state to start a concurrent marking cycle and clear |
364 | // _initiate_conc_mark_if_possible because it has now been |
365 | // acted on. |
366 | void initiate_conc_mark(); |
367 | |
368 | public: |
369 | // This sets the initiate_conc_mark_if_possible() flag to start a |
370 | // new cycle, as long as we are not already in one. It's best if it |
371 | // is called during a safepoint when the test whether a cycle is in |
372 | // progress or not is stable. |
373 | bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); |
374 | |
375 | // This is called at the very beginning of an evacuation pause (it |
376 | // has to be the first thing that the pause does). If |
377 | // initiate_conc_mark_if_possible() is true, and the concurrent |
378 | // marking thread has completed its work during the previous cycle, |
379 | // it will set in_initial_mark_gc() to so that the pause does |
380 | // the initial-mark work and start a marking cycle. |
381 | void decide_on_conc_mark_initiation(); |
382 | |
383 | void finished_recalculating_age_indexes(bool is_survivors) { |
384 | if (is_survivors) { |
385 | _survivor_surv_rate_group->finished_recalculating_age_indexes(); |
386 | } else { |
387 | _short_lived_surv_rate_group->finished_recalculating_age_indexes(); |
388 | } |
389 | } |
390 | |
391 | size_t young_list_target_length() const { return _young_list_target_length; } |
392 | |
393 | bool should_allocate_mutator_region() const; |
394 | |
395 | bool can_expand_young_list() const; |
396 | |
397 | uint young_list_max_length() const { |
398 | return _young_list_max_length; |
399 | } |
400 | |
401 | bool use_adaptive_young_list_length() const; |
402 | |
403 | void transfer_survivors_to_cset(const G1SurvivorRegions* survivors); |
404 | |
405 | private: |
406 | // |
407 | // Survivor regions policy. |
408 | // |
409 | |
410 | // Current tenuring threshold, set to 0 if the collector reaches the |
411 | // maximum amount of survivors regions. |
412 | uint _tenuring_threshold; |
413 | |
414 | // The limit on the number of regions allocated for survivors. |
415 | uint _max_survivor_regions; |
416 | |
417 | AgeTable _survivors_age_table; |
418 | |
419 | size_t desired_survivor_size(uint max_regions) const; |
420 | |
421 | // Fraction used when predicting how many optional regions to include in |
422 | // the CSet. This fraction of the available time is used for optional regions, |
423 | // the rest is used to add old regions to the normal CSet. |
424 | double optional_prediction_fraction() { return 0.2; } |
425 | |
426 | public: |
427 | // Fraction used when evacuating the optional regions. This fraction of the |
428 | // remaining time is used to choose what regions to include in the evacuation. |
429 | double optional_evacuation_fraction() { return 0.75; } |
430 | |
431 | uint tenuring_threshold() const { return _tenuring_threshold; } |
432 | |
433 | uint max_survivor_regions() { |
434 | return _max_survivor_regions; |
435 | } |
436 | |
437 | void note_start_adding_survivor_regions() { |
438 | _survivor_surv_rate_group->start_adding_regions(); |
439 | } |
440 | |
441 | void note_stop_adding_survivor_regions() { |
442 | _survivor_surv_rate_group->stop_adding_regions(); |
443 | } |
444 | |
445 | void record_age_table(AgeTable* age_table) { |
446 | _survivors_age_table.merge(age_table); |
447 | } |
448 | |
449 | void print_age_table(); |
450 | |
451 | void update_max_gc_locker_expansion(); |
452 | |
453 | void update_survivors_policy(); |
454 | |
455 | virtual bool force_upgrade_to_full() { |
456 | return false; |
457 | } |
458 | }; |
459 | |
460 | #endif // SHARE_GC_G1_G1POLICY_HPP |
461 | |