src/share/vm/gc_implementation/shared/gcUtil.cpp

Wed, 23 Dec 2009 09:23:54 -0800

author
ysr
date
Wed, 23 Dec 2009 09:23:54 -0800
changeset 1580
e018e6884bd8
parent 435
a61af66fc99e
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6631166: CMS: better heuristics when combatting fragmentation
Summary: Autonomic per-worker free block cache sizing, tunable coalition policies, fixes to per-size block statistics, retuned gain and bandwidth of some feedback loop filters to allow quicker reactivity to abrupt changes in ambient demand, and other heuristics to reduce fragmentation of the CMS old gen. Also tightened some assertions, including those related to locking.
Reviewed-by: jmasa

duke@435 1 /*
duke@435 2 * Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_gcUtil.cpp.incl"
duke@435 27
duke@435 28 // Catch-all file for utility classes
duke@435 29
duke@435 30 float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample,
duke@435 31 float average) {
duke@435 32 // We smooth the samples by not using weight() directly until we've
duke@435 33 // had enough data to make it meaningful. We'd like the first weight
duke@435 34 // used to be 1, the second to be 1/2, etc until we have 100/weight
duke@435 35 // samples.
duke@435 36 unsigned count_weight = 100/count();
duke@435 37 unsigned adaptive_weight = (MAX2(weight(), count_weight));
duke@435 38
duke@435 39 float new_avg = exp_avg(average, new_sample, adaptive_weight);
duke@435 40
duke@435 41 return new_avg;
duke@435 42 }
duke@435 43
duke@435 44 void AdaptiveWeightedAverage::sample(float new_sample) {
duke@435 45 increment_count();
duke@435 46 assert(count() != 0,
duke@435 47 "Wraparound -- history would be incorrectly discarded");
duke@435 48
duke@435 49 // Compute the new weighted average
duke@435 50 float new_avg = compute_adaptive_average(new_sample, average());
duke@435 51 set_average(new_avg);
duke@435 52 _last_sample = new_sample;
duke@435 53 }
duke@435 54
ysr@1580 55 void AdaptiveWeightedAverage::print() const {
ysr@1580 56 print_on(tty);
ysr@1580 57 }
ysr@1580 58
ysr@1580 59 void AdaptiveWeightedAverage::print_on(outputStream* st) const {
ysr@1580 60 guarantee(false, "NYI");
ysr@1580 61 }
ysr@1580 62
ysr@1580 63 void AdaptivePaddedAverage::print() const {
ysr@1580 64 print_on(tty);
ysr@1580 65 }
ysr@1580 66
ysr@1580 67 void AdaptivePaddedAverage::print_on(outputStream* st) const {
ysr@1580 68 guarantee(false, "NYI");
ysr@1580 69 }
ysr@1580 70
ysr@1580 71 void AdaptivePaddedNoZeroDevAverage::print() const {
ysr@1580 72 print_on(tty);
ysr@1580 73 }
ysr@1580 74
ysr@1580 75 void AdaptivePaddedNoZeroDevAverage::print_on(outputStream* st) const {
ysr@1580 76 guarantee(false, "NYI");
ysr@1580 77 }
ysr@1580 78
duke@435 79 void AdaptivePaddedAverage::sample(float new_sample) {
ysr@1580 80 // Compute new adaptive weighted average based on new sample.
duke@435 81 AdaptiveWeightedAverage::sample(new_sample);
duke@435 82
ysr@1580 83 // Now update the deviation and the padded average.
duke@435 84 float new_avg = average();
duke@435 85 float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
duke@435 86 deviation());
duke@435 87 set_deviation(new_dev);
duke@435 88 set_padded_average(new_avg + padding() * new_dev);
duke@435 89 _last_sample = new_sample;
duke@435 90 }
duke@435 91
duke@435 92 void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) {
duke@435 93 // Compute our parent classes sample information
duke@435 94 AdaptiveWeightedAverage::sample(new_sample);
duke@435 95
duke@435 96 float new_avg = average();
duke@435 97 if (new_sample != 0) {
duke@435 98 // We only create a new deviation if the sample is non-zero
duke@435 99 float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
duke@435 100 deviation());
duke@435 101
duke@435 102 set_deviation(new_dev);
duke@435 103 }
duke@435 104 set_padded_average(new_avg + padding() * deviation());
duke@435 105 _last_sample = new_sample;
duke@435 106 }
duke@435 107
duke@435 108 LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) :
duke@435 109 _sum_x(0), _sum_y(0), _sum_xy(0),
duke@435 110 _mean_x(weight), _mean_y(weight) {}
duke@435 111
duke@435 112 void LinearLeastSquareFit::update(double x, double y) {
duke@435 113 _sum_x = _sum_x + x;
duke@435 114 _sum_x_squared = _sum_x_squared + x * x;
duke@435 115 _sum_y = _sum_y + y;
duke@435 116 _sum_xy = _sum_xy + x * y;
duke@435 117 _mean_x.sample(x);
duke@435 118 _mean_y.sample(y);
duke@435 119 assert(_mean_x.count() == _mean_y.count(), "Incorrect count");
duke@435 120 if ( _mean_x.count() > 1 ) {
duke@435 121 double slope_denominator;
duke@435 122 slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x);
duke@435 123 // Some tolerance should be injected here. A denominator that is
duke@435 124 // nearly 0 should be avoided.
duke@435 125
duke@435 126 if (slope_denominator != 0.0) {
duke@435 127 double slope_numerator;
duke@435 128 slope_numerator = (_mean_x.count() * _sum_xy - _sum_x * _sum_y);
duke@435 129 _slope = slope_numerator / slope_denominator;
duke@435 130
duke@435 131 // The _mean_y and _mean_x are decaying averages and can
duke@435 132 // be used to discount earlier data. If they are used,
duke@435 133 // first consider whether all the quantities should be
duke@435 134 // kept as decaying averages.
duke@435 135 // _intercept = _mean_y.average() - _slope * _mean_x.average();
duke@435 136 _intercept = (_sum_y - _slope * _sum_x) / ((double) _mean_x.count());
duke@435 137 }
duke@435 138 }
duke@435 139 }
duke@435 140
duke@435 141 double LinearLeastSquareFit::y(double x) {
duke@435 142 double new_y;
duke@435 143
duke@435 144 if ( _mean_x.count() > 1 ) {
duke@435 145 new_y = (_intercept + _slope * x);
duke@435 146 return new_y;
duke@435 147 } else {
duke@435 148 return _mean_y.average();
duke@435 149 }
duke@435 150 }
duke@435 151
duke@435 152 // Both decrement_will_decrease() and increment_will_decrease() return
duke@435 153 // true for a slope of 0. That is because a change is necessary before
duke@435 154 // a slope can be calculated and a 0 slope will, in general, indicate
duke@435 155 // that no calculation of the slope has yet been done. Returning true
duke@435 156 // for a slope equal to 0 reflects the intuitive expectation of the
duke@435 157 // dependence on the slope. Don't use the complement of these functions
duke@435 158 // since that untuitive expectation is not built into the complement.
duke@435 159 bool LinearLeastSquareFit::decrement_will_decrease() {
duke@435 160 return (_slope >= 0.00);
duke@435 161 }
duke@435 162
duke@435 163 bool LinearLeastSquareFit::increment_will_decrease() {
duke@435 164 return (_slope <= 0.00);
duke@435 165 }

mercurial