1/*
2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/gcUtil.hpp"
27
28// Catch-all file for utility classes
29
30float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample,
31 float average) {
32 // We smooth the samples by not using weight() directly until we've
33 // had enough data to make it meaningful. We'd like the first weight
34 // used to be 1, the second to be 1/2, etc until we have
35 // OLD_THRESHOLD/weight samples.
36 unsigned count_weight = 0;
37
38 // Avoid division by zero if the counter wraps (7158457)
39 if (!is_old()) {
40 count_weight = OLD_THRESHOLD/count();
41 }
42
43 unsigned adaptive_weight = (MAX2(weight(), count_weight));
44
45 float new_avg = exp_avg(average, new_sample, adaptive_weight);
46
47 return new_avg;
48}
49
50void AdaptiveWeightedAverage::sample(float new_sample) {
51 increment_count();
52
53 // Compute the new weighted average
54 float new_avg = compute_adaptive_average(new_sample, average());
55 set_average(new_avg);
56 _last_sample = new_sample;
57}
58
59void AdaptiveWeightedAverage::print() const {
60 print_on(tty);
61}
62
63void AdaptiveWeightedAverage::print_on(outputStream* st) const {
64 guarantee(false, "NYI");
65}
66
67void AdaptivePaddedAverage::print() const {
68 print_on(tty);
69}
70
71void AdaptivePaddedAverage::print_on(outputStream* st) const {
72 guarantee(false, "NYI");
73}
74
75void AdaptivePaddedNoZeroDevAverage::print() const {
76 print_on(tty);
77}
78
79void AdaptivePaddedNoZeroDevAverage::print_on(outputStream* st) const {
80 guarantee(false, "NYI");
81}
82
83void AdaptivePaddedAverage::sample(float new_sample) {
84 // Compute new adaptive weighted average based on new sample.
85 AdaptiveWeightedAverage::sample(new_sample);
86
87 // Now update the deviation and the padded average.
88 float new_avg = average();
89 float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
90 deviation());
91 set_deviation(new_dev);
92 set_padded_average(new_avg + padding() * new_dev);
93 _last_sample = new_sample;
94}
95
96void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) {
97 // Compute our parent classes sample information
98 AdaptiveWeightedAverage::sample(new_sample);
99
100 float new_avg = average();
101 if (new_sample != 0) {
102 // We only create a new deviation if the sample is non-zero
103 float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
104 deviation());
105
106 set_deviation(new_dev);
107 }
108 set_padded_average(new_avg + padding() * deviation());
109 _last_sample = new_sample;
110}
111
112LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) :
113 _sum_x(0), _sum_x_squared(0), _sum_y(0), _sum_xy(0),
114 _intercept(0), _slope(0), _mean_x(weight), _mean_y(weight) {}
115
116void LinearLeastSquareFit::update(double x, double y) {
117 _sum_x = _sum_x + x;
118 _sum_x_squared = _sum_x_squared + x * x;
119 _sum_y = _sum_y + y;
120 _sum_xy = _sum_xy + x * y;
121 _mean_x.sample(x);
122 _mean_y.sample(y);
123 assert(_mean_x.count() == _mean_y.count(), "Incorrect count");
124 if ( _mean_x.count() > 1 ) {
125 double slope_denominator;
126 slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x);
127 // Some tolerance should be injected here. A denominator that is
128 // nearly 0 should be avoided.
129
130 if (slope_denominator != 0.0) {
131 double slope_numerator;
132 slope_numerator = (_mean_x.count() * _sum_xy - _sum_x * _sum_y);
133 _slope = slope_numerator / slope_denominator;
134
135 // The _mean_y and _mean_x are decaying averages and can
136 // be used to discount earlier data. If they are used,
137 // first consider whether all the quantities should be
138 // kept as decaying averages.
139 // _intercept = _mean_y.average() - _slope * _mean_x.average();
140 _intercept = (_sum_y - _slope * _sum_x) / ((double) _mean_x.count());
141 }
142 }
143}
144
145double LinearLeastSquareFit::y(double x) {
146 double new_y;
147
148 if ( _mean_x.count() > 1 ) {
149 new_y = (_intercept + _slope * x);
150 return new_y;
151 } else {
152 return _mean_y.average();
153 }
154}
155
156// Both decrement_will_decrease() and increment_will_decrease() return
157// true for a slope of 0. That is because a change is necessary before
158// a slope can be calculated and a 0 slope will, in general, indicate
159// that no calculation of the slope has yet been done. Returning true
160// for a slope equal to 0 reflects the intuitive expectation of the
161// dependence on the slope. Don't use the complement of these functions
162// since that intuitive expectation is not built into the complement.
163bool LinearLeastSquareFit::decrement_will_decrease() {
164 return (_slope >= 0.00);
165}
166
167bool LinearLeastSquareFit::increment_will_decrease() {
168 return (_slope <= 0.00);
169}
170