1 | /* |
2 | * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/g1/g1CollectedHeap.hpp" |
27 | #include "gc/g1/g1HeapSizingPolicy.hpp" |
28 | #include "gc/g1/g1Analytics.hpp" |
29 | #include "logging/log.hpp" |
30 | #include "runtime/globals.hpp" |
31 | #include "utilities/debug.hpp" |
32 | #include "utilities/globalDefinitions.hpp" |
33 | |
34 | G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) : |
35 | _g1h(g1h), |
36 | _analytics(analytics), |
37 | _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) { |
38 | |
39 | assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u" , _num_prev_pauses_for_heuristics); |
40 | clear_ratio_check_data(); |
41 | } |
42 | |
43 | void G1HeapSizingPolicy::clear_ratio_check_data() { |
44 | _ratio_over_threshold_count = 0; |
45 | _ratio_over_threshold_sum = 0.0; |
46 | _pauses_since_start = 0; |
47 | } |
48 | |
49 | size_t G1HeapSizingPolicy::expansion_amount() { |
50 | double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0; |
51 | double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0; |
52 | assert(GCTimeRatio > 0, |
53 | "we should have set it to a default value set_g1_gc_flags() " |
54 | "if a user set it to 0" ); |
55 | const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio)); |
56 | |
57 | double threshold = gc_overhead_percent; |
58 | size_t expand_bytes = 0; |
59 | |
60 | // If the heap is at less than half its maximum size, scale the threshold down, |
61 | // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand, |
62 | // though the scaling code will likely keep the increase small. |
63 | if (_g1h->capacity() <= _g1h->max_capacity() / 2) { |
64 | threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2); |
65 | threshold = MAX2(threshold, 1.0); |
66 | } |
67 | |
68 | // If the last GC time ratio is over the threshold, increment the count of |
69 | // times it has been exceeded, and add this ratio to the sum of exceeded |
70 | // ratios. |
71 | if (last_gc_overhead > threshold) { |
72 | _ratio_over_threshold_count++; |
73 | _ratio_over_threshold_sum += last_gc_overhead; |
74 | } |
75 | |
76 | // Check if we've had enough GC time ratio checks that were over the |
77 | // threshold to trigger an expansion. We'll also expand if we've |
78 | // reached the end of the history buffer and the average of all entries |
79 | // is still over the threshold. This indicates a smaller number of GCs were |
80 | // long enough to make the average exceed the threshold. |
81 | bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics; |
82 | if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || |
83 | (filled_history_buffer && (recent_gc_overhead > threshold))) { |
84 | size_t min_expand_bytes = HeapRegion::GrainBytes; |
85 | size_t reserved_bytes = _g1h->max_capacity(); |
86 | size_t committed_bytes = _g1h->capacity(); |
87 | size_t uncommitted_bytes = reserved_bytes - committed_bytes; |
88 | size_t expand_bytes_via_pct = |
89 | uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; |
90 | double scale_factor = 1.0; |
91 | |
92 | // If the current size is less than 1/4 of the Initial heap size, expand |
93 | // by half of the delta between the current and Initial sizes. IE, grow |
94 | // back quickly. |
95 | // |
96 | // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of |
97 | // the available expansion space, whichever is smaller, as the base |
98 | // expansion size. Then possibly scale this size according to how much the |
99 | // threshold has (on average) been exceeded by. If the delta is small |
100 | // (less than the StartScaleDownAt value), scale the size down linearly, but |
101 | // not by less than MinScaleDownFactor. If the delta is large (greater than |
102 | // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor |
103 | // times the base size. The scaling will be linear in the range from |
104 | // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, |
105 | // ScaleUpRange sets the rate of scaling up. |
106 | if (committed_bytes < InitialHeapSize / 4) { |
107 | expand_bytes = (InitialHeapSize - committed_bytes) / 2; |
108 | } else { |
109 | double const MinScaleDownFactor = 0.2; |
110 | double const MaxScaleUpFactor = 2; |
111 | double const StartScaleDownAt = gc_overhead_percent; |
112 | double const StartScaleUpAt = gc_overhead_percent * 1.5; |
113 | double const ScaleUpRange = gc_overhead_percent * 2.0; |
114 | |
115 | double ratio_delta; |
116 | if (filled_history_buffer) { |
117 | ratio_delta = recent_gc_overhead - threshold; |
118 | } else { |
119 | ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold; |
120 | } |
121 | |
122 | expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); |
123 | if (ratio_delta < StartScaleDownAt) { |
124 | scale_factor = ratio_delta / StartScaleDownAt; |
125 | scale_factor = MAX2(scale_factor, MinScaleDownFactor); |
126 | } else if (ratio_delta > StartScaleUpAt) { |
127 | scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); |
128 | scale_factor = MIN2(scale_factor, MaxScaleUpFactor); |
129 | } |
130 | } |
131 | |
132 | log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " |
133 | "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)" , |
134 | recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); |
135 | |
136 | expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); |
137 | |
138 | // Ensure the expansion size is at least the minimum growth amount |
139 | // and at most the remaining uncommitted byte size. |
140 | expand_bytes = MAX2(expand_bytes, min_expand_bytes); |
141 | expand_bytes = MIN2(expand_bytes, uncommitted_bytes); |
142 | |
143 | clear_ratio_check_data(); |
144 | } else { |
145 | // An expansion was not triggered. If we've started counting, increment |
146 | // the number of checks we've made in the current window. If we've |
147 | // reached the end of the window without resizing, clear the counters to |
148 | // start again the next time we see a ratio above the threshold. |
149 | if (_ratio_over_threshold_count > 0) { |
150 | _pauses_since_start++; |
151 | if (_pauses_since_start > _num_prev_pauses_for_heuristics) { |
152 | clear_ratio_check_data(); |
153 | } |
154 | } |
155 | } |
156 | |
157 | return expand_bytes; |
158 | } |
159 | |