src/share/vm/gc_implementation/shared/gcUtil.cpp

Thu, 20 Sep 2012 09:52:56 -0700

author
johnc
date
Thu, 20 Sep 2012 09:52:56 -0700
changeset 4067
b2ef234911c9
parent 3763
78a1b285cda8
child 4153
b9a9ed0f8eeb
permissions
-rw-r--r--

7190666: G1: assert(_unused == 0) failed: Inconsistency in PLAB stats
Summary: Reset the fields in ParGCAllocBuffer, that are used for accumulating values for the ResizePLAB sensors in PLABStats, to zero after flushing the values to the PLABStats fields. Flush PLABStats values only when retiring the final allocation buffers prior to disposing of a G1ParScanThreadState object, rather than when retiring every allocation buffer.
Reviewed-by: jwilhelm, jmasa, ysr

duke@435 1 /*
trims@2708 2 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/shared/gcUtil.hpp"
duke@435 27
duke@435 28 // Catch-all file for utility classes
duke@435 29
duke@435 30 float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample,
duke@435 31 float average) {
duke@435 32 // We smooth the samples by not using weight() directly until we've
duke@435 33 // had enough data to make it meaningful. We'd like the first weight
mikael@3763 34 // used to be 1, the second to be 1/2, etc until we have
mikael@3763 35 // OLD_THRESHOLD/weight samples.
mikael@3763 36 unsigned count_weight = 0;
mikael@3763 37
mikael@3763 38 // Avoid division by zero if the counter wraps (7158457)
mikael@3763 39 if (!is_old()) {
mikael@3763 40 count_weight = OLD_THRESHOLD/count();
mikael@3763 41 }
mikael@3763 42
duke@435 43 unsigned adaptive_weight = (MAX2(weight(), count_weight));
duke@435 44
duke@435 45 float new_avg = exp_avg(average, new_sample, adaptive_weight);
duke@435 46
duke@435 47 return new_avg;
duke@435 48 }
duke@435 49
duke@435 50 void AdaptiveWeightedAverage::sample(float new_sample) {
duke@435 51 increment_count();
duke@435 52
duke@435 53 // Compute the new weighted average
duke@435 54 float new_avg = compute_adaptive_average(new_sample, average());
duke@435 55 set_average(new_avg);
duke@435 56 _last_sample = new_sample;
duke@435 57 }
duke@435 58
ysr@1580 59 void AdaptiveWeightedAverage::print() const {
ysr@1580 60 print_on(tty);
ysr@1580 61 }
ysr@1580 62
ysr@1580 63 void AdaptiveWeightedAverage::print_on(outputStream* st) const {
ysr@1580 64 guarantee(false, "NYI");
ysr@1580 65 }
ysr@1580 66
ysr@1580 67 void AdaptivePaddedAverage::print() const {
ysr@1580 68 print_on(tty);
ysr@1580 69 }
ysr@1580 70
ysr@1580 71 void AdaptivePaddedAverage::print_on(outputStream* st) const {
ysr@1580 72 guarantee(false, "NYI");
ysr@1580 73 }
ysr@1580 74
ysr@1580 75 void AdaptivePaddedNoZeroDevAverage::print() const {
ysr@1580 76 print_on(tty);
ysr@1580 77 }
ysr@1580 78
ysr@1580 79 void AdaptivePaddedNoZeroDevAverage::print_on(outputStream* st) const {
ysr@1580 80 guarantee(false, "NYI");
ysr@1580 81 }
ysr@1580 82
duke@435 83 void AdaptivePaddedAverage::sample(float new_sample) {
ysr@1580 84 // Compute new adaptive weighted average based on new sample.
duke@435 85 AdaptiveWeightedAverage::sample(new_sample);
duke@435 86
ysr@1580 87 // Now update the deviation and the padded average.
duke@435 88 float new_avg = average();
duke@435 89 float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
duke@435 90 deviation());
duke@435 91 set_deviation(new_dev);
duke@435 92 set_padded_average(new_avg + padding() * new_dev);
duke@435 93 _last_sample = new_sample;
duke@435 94 }
duke@435 95
duke@435 96 void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) {
duke@435 97 // Compute our parent classes sample information
duke@435 98 AdaptiveWeightedAverage::sample(new_sample);
duke@435 99
duke@435 100 float new_avg = average();
duke@435 101 if (new_sample != 0) {
duke@435 102 // We only create a new deviation if the sample is non-zero
duke@435 103 float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
duke@435 104 deviation());
duke@435 105
duke@435 106 set_deviation(new_dev);
duke@435 107 }
duke@435 108 set_padded_average(new_avg + padding() * deviation());
duke@435 109 _last_sample = new_sample;
duke@435 110 }
duke@435 111
duke@435 112 LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) :
phh@2505 113 _sum_x(0), _sum_x_squared(0), _sum_y(0), _sum_xy(0),
phh@2505 114 _intercept(0), _slope(0), _mean_x(weight), _mean_y(weight) {}
duke@435 115
duke@435 116 void LinearLeastSquareFit::update(double x, double y) {
duke@435 117 _sum_x = _sum_x + x;
duke@435 118 _sum_x_squared = _sum_x_squared + x * x;
duke@435 119 _sum_y = _sum_y + y;
duke@435 120 _sum_xy = _sum_xy + x * y;
duke@435 121 _mean_x.sample(x);
duke@435 122 _mean_y.sample(y);
duke@435 123 assert(_mean_x.count() == _mean_y.count(), "Incorrect count");
duke@435 124 if ( _mean_x.count() > 1 ) {
duke@435 125 double slope_denominator;
duke@435 126 slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x);
duke@435 127 // Some tolerance should be injected here. A denominator that is
duke@435 128 // nearly 0 should be avoided.
duke@435 129
duke@435 130 if (slope_denominator != 0.0) {
duke@435 131 double slope_numerator;
duke@435 132 slope_numerator = (_mean_x.count() * _sum_xy - _sum_x * _sum_y);
duke@435 133 _slope = slope_numerator / slope_denominator;
duke@435 134
duke@435 135 // The _mean_y and _mean_x are decaying averages and can
duke@435 136 // be used to discount earlier data. If they are used,
duke@435 137 // first consider whether all the quantities should be
duke@435 138 // kept as decaying averages.
duke@435 139 // _intercept = _mean_y.average() - _slope * _mean_x.average();
duke@435 140 _intercept = (_sum_y - _slope * _sum_x) / ((double) _mean_x.count());
duke@435 141 }
duke@435 142 }
duke@435 143 }
duke@435 144
duke@435 145 double LinearLeastSquareFit::y(double x) {
duke@435 146 double new_y;
duke@435 147
duke@435 148 if ( _mean_x.count() > 1 ) {
duke@435 149 new_y = (_intercept + _slope * x);
duke@435 150 return new_y;
duke@435 151 } else {
duke@435 152 return _mean_y.average();
duke@435 153 }
duke@435 154 }
duke@435 155
duke@435 156 // Both decrement_will_decrease() and increment_will_decrease() return
duke@435 157 // true for a slope of 0. That is because a change is necessary before
duke@435 158 // a slope can be calculated and a 0 slope will, in general, indicate
duke@435 159 // that no calculation of the slope has yet been done. Returning true
duke@435 160 // for a slope equal to 0 reflects the intuitive expectation of the
duke@435 161 // dependence on the slope. Don't use the complement of these functions
duke@435 162 // since that untuitive expectation is not built into the complement.
duke@435 163 bool LinearLeastSquareFit::decrement_will_decrease() {
duke@435 164 return (_slope >= 0.00);
duke@435 165 }
duke@435 166
duke@435 167 bool LinearLeastSquareFit::increment_will_decrease() {
duke@435 168 return (_slope <= 0.00);
duke@435 169 }

mercurial