duke@435: /* duke@435: * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: class AllocationStats VALUE_OBJ_CLASS_SPEC { duke@435: // A duration threshold (in ms) used to filter duke@435: // possibly unreliable samples. duke@435: static float _threshold; duke@435: duke@435: // We measure the demand between the end of the previous sweep and duke@435: // beginning of this sweep: duke@435: // Count(end_last_sweep) - Count(start_this_sweep) duke@435: // + splitBirths(between) - splitDeaths(between) duke@435: // The above number divided by the time since the start [END???] of the duke@435: // previous sweep gives us a time rate of demand for blocks duke@435: // of this size. We compute a padded average of this rate as duke@435: // our current estimate for the time rate of demand for blocks duke@435: // of this size. Similarly, we keep a padded average for the time duke@435: // between sweeps. Our current estimate for demand for blocks of duke@435: // this size is then simply computed as the product of these two duke@435: // estimates. duke@435: AdaptivePaddedAverage _demand_rate_estimate; duke@435: duke@435: ssize_t _desired; // Estimate computed as described above duke@435: ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing duke@435: duke@435: ssize_t _surplus; // count - (desired +/- small-percent), duke@435: // used to tune splitting in best fit duke@435: ssize_t _bfrSurp; // surplus at start of current sweep duke@435: ssize_t _prevSweep; // count from end of previous sweep duke@435: ssize_t _beforeSweep; // count from before current sweep duke@435: ssize_t _coalBirths; // additional chunks from coalescing duke@435: ssize_t _coalDeaths; // loss from coalescing duke@435: ssize_t _splitBirths; // additional chunks from splitting duke@435: ssize_t _splitDeaths; // loss from splitting duke@435: size_t _returnedBytes; // number of bytes returned to list. duke@435: public: duke@435: void initialize() { duke@435: AdaptivePaddedAverage* dummy = duke@435: new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight, duke@435: CMS_FLSPadding); duke@435: _desired = 0; duke@435: _coalDesired = 0; duke@435: _surplus = 0; duke@435: _bfrSurp = 0; duke@435: _prevSweep = 0; duke@435: _beforeSweep = 0; duke@435: _coalBirths = 0; duke@435: _coalDeaths = 0; duke@435: _splitBirths = 0; duke@435: _splitDeaths = 0; duke@435: _returnedBytes = 0; duke@435: } duke@435: duke@435: AllocationStats() { duke@435: initialize(); duke@435: } duke@435: // The rate estimate is in blocks per second. duke@435: void compute_desired(size_t count, duke@435: float inter_sweep_current, duke@435: float inter_sweep_estimate) { duke@435: // If the latest inter-sweep time is below our granularity duke@435: // of measurement, we may call in here with duke@435: // inter_sweep_current == 0. However, even for suitably small duke@435: // but non-zero inter-sweep durations, we may not trust the accuracy duke@435: // of accumulated data, since it has not been "integrated" duke@435: // (read "low-pass-filtered") long enough, and would be duke@435: // vulnerable to noisy glitches. In such cases, we duke@435: // ignore the current sample and use currently available duke@435: // historical estimates. duke@435: if (inter_sweep_current > _threshold) { duke@435: ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths(); duke@435: float rate = ((float)demand)/inter_sweep_current; duke@435: _demand_rate_estimate.sample(rate); duke@435: _desired = (ssize_t)(_demand_rate_estimate.padded_average() duke@435: *inter_sweep_estimate); duke@435: } duke@435: } duke@435: duke@435: ssize_t desired() const { return _desired; } ysr@447: void set_desired(ssize_t v) { _desired = v; } ysr@447: duke@435: ssize_t coalDesired() const { return _coalDesired; } duke@435: void set_coalDesired(ssize_t v) { _coalDesired = v; } duke@435: duke@435: ssize_t surplus() const { return _surplus; } duke@435: void set_surplus(ssize_t v) { _surplus = v; } duke@435: void increment_surplus() { _surplus++; } duke@435: void decrement_surplus() { _surplus--; } duke@435: duke@435: ssize_t bfrSurp() const { return _bfrSurp; } duke@435: void set_bfrSurp(ssize_t v) { _bfrSurp = v; } duke@435: ssize_t prevSweep() const { return _prevSweep; } duke@435: void set_prevSweep(ssize_t v) { _prevSweep = v; } duke@435: ssize_t beforeSweep() const { return _beforeSweep; } duke@435: void set_beforeSweep(ssize_t v) { _beforeSweep = v; } duke@435: duke@435: ssize_t coalBirths() const { return _coalBirths; } duke@435: void set_coalBirths(ssize_t v) { _coalBirths = v; } duke@435: void increment_coalBirths() { _coalBirths++; } duke@435: duke@435: ssize_t coalDeaths() const { return _coalDeaths; } duke@435: void set_coalDeaths(ssize_t v) { _coalDeaths = v; } duke@435: void increment_coalDeaths() { _coalDeaths++; } duke@435: duke@435: ssize_t splitBirths() const { return _splitBirths; } duke@435: void set_splitBirths(ssize_t v) { _splitBirths = v; } duke@435: void increment_splitBirths() { _splitBirths++; } duke@435: duke@435: ssize_t splitDeaths() const { return _splitDeaths; } duke@435: void set_splitDeaths(ssize_t v) { _splitDeaths = v; } duke@435: void increment_splitDeaths() { _splitDeaths++; } duke@435: duke@435: NOT_PRODUCT( duke@435: size_t returnedBytes() const { return _returnedBytes; } duke@435: void set_returnedBytes(size_t v) { _returnedBytes = v; } duke@435: ) duke@435: };