Sun, 13 Apr 2008 17:43:42 -0400
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | class AllocationStats VALUE_OBJ_CLASS_SPEC { |
duke@435 | 26 | // A duration threshold (in ms) used to filter |
duke@435 | 27 | // possibly unreliable samples. |
duke@435 | 28 | static float _threshold; |
duke@435 | 29 | |
duke@435 | 30 | // We measure the demand between the end of the previous sweep and |
duke@435 | 31 | // beginning of this sweep: |
duke@435 | 32 | // Count(end_last_sweep) - Count(start_this_sweep) |
duke@435 | 33 | // + splitBirths(between) - splitDeaths(between) |
duke@435 | 34 | // The above number divided by the time since the start [END???] of the |
duke@435 | 35 | // previous sweep gives us a time rate of demand for blocks |
duke@435 | 36 | // of this size. We compute a padded average of this rate as |
duke@435 | 37 | // our current estimate for the time rate of demand for blocks |
duke@435 | 38 | // of this size. Similarly, we keep a padded average for the time |
duke@435 | 39 | // between sweeps. Our current estimate for demand for blocks of |
duke@435 | 40 | // this size is then simply computed as the product of these two |
duke@435 | 41 | // estimates. |
duke@435 | 42 | AdaptivePaddedAverage _demand_rate_estimate; |
duke@435 | 43 | |
duke@435 | 44 | ssize_t _desired; // Estimate computed as described above |
duke@435 | 45 | ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing |
duke@435 | 46 | |
duke@435 | 47 | ssize_t _surplus; // count - (desired +/- small-percent), |
duke@435 | 48 | // used to tune splitting in best fit |
duke@435 | 49 | ssize_t _bfrSurp; // surplus at start of current sweep |
duke@435 | 50 | ssize_t _prevSweep; // count from end of previous sweep |
duke@435 | 51 | ssize_t _beforeSweep; // count from before current sweep |
duke@435 | 52 | ssize_t _coalBirths; // additional chunks from coalescing |
duke@435 | 53 | ssize_t _coalDeaths; // loss from coalescing |
duke@435 | 54 | ssize_t _splitBirths; // additional chunks from splitting |
duke@435 | 55 | ssize_t _splitDeaths; // loss from splitting |
duke@435 | 56 | size_t _returnedBytes; // number of bytes returned to list. |
duke@435 | 57 | public: |
duke@435 | 58 | void initialize() { |
duke@435 | 59 | AdaptivePaddedAverage* dummy = |
duke@435 | 60 | new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight, |
duke@435 | 61 | CMS_FLSPadding); |
duke@435 | 62 | _desired = 0; |
duke@435 | 63 | _coalDesired = 0; |
duke@435 | 64 | _surplus = 0; |
duke@435 | 65 | _bfrSurp = 0; |
duke@435 | 66 | _prevSweep = 0; |
duke@435 | 67 | _beforeSweep = 0; |
duke@435 | 68 | _coalBirths = 0; |
duke@435 | 69 | _coalDeaths = 0; |
duke@435 | 70 | _splitBirths = 0; |
duke@435 | 71 | _splitDeaths = 0; |
duke@435 | 72 | _returnedBytes = 0; |
duke@435 | 73 | } |
duke@435 | 74 | |
duke@435 | 75 | AllocationStats() { |
duke@435 | 76 | initialize(); |
duke@435 | 77 | } |
duke@435 | 78 | // The rate estimate is in blocks per second. |
duke@435 | 79 | void compute_desired(size_t count, |
duke@435 | 80 | float inter_sweep_current, |
duke@435 | 81 | float inter_sweep_estimate) { |
duke@435 | 82 | // If the latest inter-sweep time is below our granularity |
duke@435 | 83 | // of measurement, we may call in here with |
duke@435 | 84 | // inter_sweep_current == 0. However, even for suitably small |
duke@435 | 85 | // but non-zero inter-sweep durations, we may not trust the accuracy |
duke@435 | 86 | // of accumulated data, since it has not been "integrated" |
duke@435 | 87 | // (read "low-pass-filtered") long enough, and would be |
duke@435 | 88 | // vulnerable to noisy glitches. In such cases, we |
duke@435 | 89 | // ignore the current sample and use currently available |
duke@435 | 90 | // historical estimates. |
duke@435 | 91 | if (inter_sweep_current > _threshold) { |
duke@435 | 92 | ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths(); |
duke@435 | 93 | float rate = ((float)demand)/inter_sweep_current; |
duke@435 | 94 | _demand_rate_estimate.sample(rate); |
duke@435 | 95 | _desired = (ssize_t)(_demand_rate_estimate.padded_average() |
duke@435 | 96 | *inter_sweep_estimate); |
duke@435 | 97 | } |
duke@435 | 98 | } |
duke@435 | 99 | |
duke@435 | 100 | ssize_t desired() const { return _desired; } |
ysr@447 | 101 | void set_desired(ssize_t v) { _desired = v; } |
ysr@447 | 102 | |
duke@435 | 103 | ssize_t coalDesired() const { return _coalDesired; } |
duke@435 | 104 | void set_coalDesired(ssize_t v) { _coalDesired = v; } |
duke@435 | 105 | |
duke@435 | 106 | ssize_t surplus() const { return _surplus; } |
duke@435 | 107 | void set_surplus(ssize_t v) { _surplus = v; } |
duke@435 | 108 | void increment_surplus() { _surplus++; } |
duke@435 | 109 | void decrement_surplus() { _surplus--; } |
duke@435 | 110 | |
duke@435 | 111 | ssize_t bfrSurp() const { return _bfrSurp; } |
duke@435 | 112 | void set_bfrSurp(ssize_t v) { _bfrSurp = v; } |
duke@435 | 113 | ssize_t prevSweep() const { return _prevSweep; } |
duke@435 | 114 | void set_prevSweep(ssize_t v) { _prevSweep = v; } |
duke@435 | 115 | ssize_t beforeSweep() const { return _beforeSweep; } |
duke@435 | 116 | void set_beforeSweep(ssize_t v) { _beforeSweep = v; } |
duke@435 | 117 | |
duke@435 | 118 | ssize_t coalBirths() const { return _coalBirths; } |
duke@435 | 119 | void set_coalBirths(ssize_t v) { _coalBirths = v; } |
duke@435 | 120 | void increment_coalBirths() { _coalBirths++; } |
duke@435 | 121 | |
duke@435 | 122 | ssize_t coalDeaths() const { return _coalDeaths; } |
duke@435 | 123 | void set_coalDeaths(ssize_t v) { _coalDeaths = v; } |
duke@435 | 124 | void increment_coalDeaths() { _coalDeaths++; } |
duke@435 | 125 | |
duke@435 | 126 | ssize_t splitBirths() const { return _splitBirths; } |
duke@435 | 127 | void set_splitBirths(ssize_t v) { _splitBirths = v; } |
duke@435 | 128 | void increment_splitBirths() { _splitBirths++; } |
duke@435 | 129 | |
duke@435 | 130 | ssize_t splitDeaths() const { return _splitDeaths; } |
duke@435 | 131 | void set_splitDeaths(ssize_t v) { _splitDeaths = v; } |
duke@435 | 132 | void increment_splitDeaths() { _splitDeaths++; } |
duke@435 | 133 | |
duke@435 | 134 | NOT_PRODUCT( |
duke@435 | 135 | size_t returnedBytes() const { return _returnedBytes; } |
duke@435 | 136 | void set_returnedBytes(size_t v) { _returnedBytes = v; } |
duke@435 | 137 | ) |
duke@435 | 138 | }; |