src/share/vm/gc_implementation/shared/gcUtil.cpp

Sun, 13 Apr 2008 17:43:42 -0400

author
coleenp
date
Sun, 13 Apr 2008 17:43:42 -0400
changeset 548
ba764ed4b6f2
parent 435
a61af66fc99e
child 1580
e018e6884bd8
permissions
-rw-r--r--

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold

     1 /*
     2  * Copyright 2002-2005 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_gcUtil.cpp.incl"
    28 // Catch-all file for utility classes
    30 float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample,
    31                                                         float average) {
    32   // We smooth the samples by not using weight() directly until we've
    33   // had enough data to make it meaningful. We'd like the first weight
    34   // used to be 1, the second to be 1/2, etc until we have 100/weight
    35   // samples.
    36   unsigned count_weight = 100/count();
    37   unsigned adaptive_weight = (MAX2(weight(), count_weight));
    39   float new_avg = exp_avg(average, new_sample, adaptive_weight);
    41   return new_avg;
    42 }
    44 void AdaptiveWeightedAverage::sample(float new_sample) {
    45   increment_count();
    46   assert(count() != 0,
    47          "Wraparound -- history would be incorrectly discarded");
    49   // Compute the new weighted average
    50   float new_avg = compute_adaptive_average(new_sample, average());
    51   set_average(new_avg);
    52   _last_sample = new_sample;
    53 }
    55 void AdaptivePaddedAverage::sample(float new_sample) {
    56   // Compute our parent classes sample information
    57   AdaptiveWeightedAverage::sample(new_sample);
    59   // Now compute the deviation and the new padded sample
    60   float new_avg = average();
    61   float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
    62                                            deviation());
    63   set_deviation(new_dev);
    64   set_padded_average(new_avg + padding() * new_dev);
    65   _last_sample = new_sample;
    66 }
    68 void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) {
    69   // Compute our parent classes sample information
    70   AdaptiveWeightedAverage::sample(new_sample);
    72   float new_avg = average();
    73   if (new_sample != 0) {
    74     // We only create a new deviation if the sample is non-zero
    75     float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
    76                                              deviation());
    78     set_deviation(new_dev);
    79   }
    80   set_padded_average(new_avg + padding() * deviation());
    81   _last_sample = new_sample;
    82 }
    84 LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) :
    85   _sum_x(0), _sum_y(0), _sum_xy(0),
    86   _mean_x(weight), _mean_y(weight) {}
    88 void LinearLeastSquareFit::update(double x, double y) {
    89   _sum_x = _sum_x + x;
    90   _sum_x_squared = _sum_x_squared + x * x;
    91   _sum_y = _sum_y + y;
    92   _sum_xy = _sum_xy + x * y;
    93   _mean_x.sample(x);
    94   _mean_y.sample(y);
    95   assert(_mean_x.count() == _mean_y.count(), "Incorrect count");
    96   if ( _mean_x.count() > 1 ) {
    97     double slope_denominator;
    98     slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x);
    99     // Some tolerance should be injected here.  A denominator that is
   100     // nearly 0 should be avoided.
   102     if (slope_denominator != 0.0) {
   103       double slope_numerator;
   104       slope_numerator = (_mean_x.count() * _sum_xy - _sum_x * _sum_y);
   105       _slope = slope_numerator / slope_denominator;
   107       // The _mean_y and _mean_x are decaying averages and can
   108       // be used to discount earlier data.  If they are used,
   109       // first consider whether all the quantities should be
   110       // kept as decaying averages.
   111       // _intercept = _mean_y.average() - _slope * _mean_x.average();
   112       _intercept = (_sum_y - _slope * _sum_x) / ((double) _mean_x.count());
   113     }
   114   }
   115 }
   117 double LinearLeastSquareFit::y(double x) {
   118   double new_y;
   120   if ( _mean_x.count() > 1 ) {
   121     new_y = (_intercept + _slope * x);
   122     return new_y;
   123   } else {
   124     return _mean_y.average();
   125   }
   126 }
   128 // Both decrement_will_decrease() and increment_will_decrease() return
   129 // true for a slope of 0.  That is because a change is necessary before
   130 // a slope can be calculated and a 0 slope will, in general, indicate
   131 // that no calculation of the slope has yet been done.  Returning true
   132 // for a slope equal to 0 reflects the intuitive expectation of the
   133 // dependence on the slope.  Don't use the complement of these functions
   134 // since that untuitive expectation is not built into the complement.
   135 bool LinearLeastSquareFit::decrement_will_decrease() {
   136   return (_slope >= 0.00);
   137 }
   139 bool LinearLeastSquareFit::increment_will_decrease() {
   140   return (_slope <= 0.00);
   141 }

mercurial