Mon, 09 Jun 2008 07:18:59 -0700
6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
Summary: The fix takes care of three issues that can create a hole less a minimal object in the lgrp chunk
Reviewed-by: ysr, apetrusenko
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_gcUtil.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | // Catch-all file for utility classes |
duke@435 | 29 | |
duke@435 | 30 | float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample, |
duke@435 | 31 | float average) { |
duke@435 | 32 | // We smooth the samples by not using weight() directly until we've |
duke@435 | 33 | // had enough data to make it meaningful. We'd like the first weight |
duke@435 | 34 | // used to be 1, the second to be 1/2, etc until we have 100/weight |
duke@435 | 35 | // samples. |
duke@435 | 36 | unsigned count_weight = 100/count(); |
duke@435 | 37 | unsigned adaptive_weight = (MAX2(weight(), count_weight)); |
duke@435 | 38 | |
duke@435 | 39 | float new_avg = exp_avg(average, new_sample, adaptive_weight); |
duke@435 | 40 | |
duke@435 | 41 | return new_avg; |
duke@435 | 42 | } |
duke@435 | 43 | |
duke@435 | 44 | void AdaptiveWeightedAverage::sample(float new_sample) { |
duke@435 | 45 | increment_count(); |
duke@435 | 46 | assert(count() != 0, |
duke@435 | 47 | "Wraparound -- history would be incorrectly discarded"); |
duke@435 | 48 | |
duke@435 | 49 | // Compute the new weighted average |
duke@435 | 50 | float new_avg = compute_adaptive_average(new_sample, average()); |
duke@435 | 51 | set_average(new_avg); |
duke@435 | 52 | _last_sample = new_sample; |
duke@435 | 53 | } |
duke@435 | 54 | |
duke@435 | 55 | void AdaptivePaddedAverage::sample(float new_sample) { |
duke@435 | 56 | // Compute our parent classes sample information |
duke@435 | 57 | AdaptiveWeightedAverage::sample(new_sample); |
duke@435 | 58 | |
duke@435 | 59 | // Now compute the deviation and the new padded sample |
duke@435 | 60 | float new_avg = average(); |
duke@435 | 61 | float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg), |
duke@435 | 62 | deviation()); |
duke@435 | 63 | set_deviation(new_dev); |
duke@435 | 64 | set_padded_average(new_avg + padding() * new_dev); |
duke@435 | 65 | _last_sample = new_sample; |
duke@435 | 66 | } |
duke@435 | 67 | |
duke@435 | 68 | void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) { |
duke@435 | 69 | // Compute our parent classes sample information |
duke@435 | 70 | AdaptiveWeightedAverage::sample(new_sample); |
duke@435 | 71 | |
duke@435 | 72 | float new_avg = average(); |
duke@435 | 73 | if (new_sample != 0) { |
duke@435 | 74 | // We only create a new deviation if the sample is non-zero |
duke@435 | 75 | float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg), |
duke@435 | 76 | deviation()); |
duke@435 | 77 | |
duke@435 | 78 | set_deviation(new_dev); |
duke@435 | 79 | } |
duke@435 | 80 | set_padded_average(new_avg + padding() * deviation()); |
duke@435 | 81 | _last_sample = new_sample; |
duke@435 | 82 | } |
duke@435 | 83 | |
duke@435 | 84 | LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) : |
duke@435 | 85 | _sum_x(0), _sum_y(0), _sum_xy(0), |
duke@435 | 86 | _mean_x(weight), _mean_y(weight) {} |
duke@435 | 87 | |
duke@435 | 88 | void LinearLeastSquareFit::update(double x, double y) { |
duke@435 | 89 | _sum_x = _sum_x + x; |
duke@435 | 90 | _sum_x_squared = _sum_x_squared + x * x; |
duke@435 | 91 | _sum_y = _sum_y + y; |
duke@435 | 92 | _sum_xy = _sum_xy + x * y; |
duke@435 | 93 | _mean_x.sample(x); |
duke@435 | 94 | _mean_y.sample(y); |
duke@435 | 95 | assert(_mean_x.count() == _mean_y.count(), "Incorrect count"); |
duke@435 | 96 | if ( _mean_x.count() > 1 ) { |
duke@435 | 97 | double slope_denominator; |
duke@435 | 98 | slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x); |
duke@435 | 99 | // Some tolerance should be injected here. A denominator that is |
duke@435 | 100 | // nearly 0 should be avoided. |
duke@435 | 101 | |
duke@435 | 102 | if (slope_denominator != 0.0) { |
duke@435 | 103 | double slope_numerator; |
duke@435 | 104 | slope_numerator = (_mean_x.count() * _sum_xy - _sum_x * _sum_y); |
duke@435 | 105 | _slope = slope_numerator / slope_denominator; |
duke@435 | 106 | |
duke@435 | 107 | // The _mean_y and _mean_x are decaying averages and can |
duke@435 | 108 | // be used to discount earlier data. If they are used, |
duke@435 | 109 | // first consider whether all the quantities should be |
duke@435 | 110 | // kept as decaying averages. |
duke@435 | 111 | // _intercept = _mean_y.average() - _slope * _mean_x.average(); |
duke@435 | 112 | _intercept = (_sum_y - _slope * _sum_x) / ((double) _mean_x.count()); |
duke@435 | 113 | } |
duke@435 | 114 | } |
duke@435 | 115 | } |
duke@435 | 116 | |
duke@435 | 117 | double LinearLeastSquareFit::y(double x) { |
duke@435 | 118 | double new_y; |
duke@435 | 119 | |
duke@435 | 120 | if ( _mean_x.count() > 1 ) { |
duke@435 | 121 | new_y = (_intercept + _slope * x); |
duke@435 | 122 | return new_y; |
duke@435 | 123 | } else { |
duke@435 | 124 | return _mean_y.average(); |
duke@435 | 125 | } |
duke@435 | 126 | } |
duke@435 | 127 | |
duke@435 | 128 | // Both decrement_will_decrease() and increment_will_decrease() return |
duke@435 | 129 | // true for a slope of 0. That is because a change is necessary before |
duke@435 | 130 | // a slope can be calculated and a 0 slope will, in general, indicate |
duke@435 | 131 | // that no calculation of the slope has yet been done. Returning true |
duke@435 | 132 | // for a slope equal to 0 reflects the intuitive expectation of the |
duke@435 | 133 | // dependence on the slope. Don't use the complement of these functions |
duke@435 | 134 | // since that untuitive expectation is not built into the complement. |
duke@435 | 135 | bool LinearLeastSquareFit::decrement_will_decrease() { |
duke@435 | 136 | return (_slope >= 0.00); |
duke@435 | 137 | } |
duke@435 | 138 | |
duke@435 | 139 | bool LinearLeastSquareFit::increment_will_decrease() { |
duke@435 | 140 | return (_slope <= 0.00); |
duke@435 | 141 | } |