src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp

Wed, 23 Dec 2009 09:23:54 -0800

author
ysr
date
Wed, 23 Dec 2009 09:23:54 -0800
changeset 1580
e018e6884bd8
parent 631
d1605aabd0a1
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6631166: CMS: better heuristics when combatting fragmentation
Summary: Autonomic per-worker free block cache sizing, tunable coalition policies, fixes to per-size block statistics, retuned gain and bandwidth of some feedback loop filters to allow quicker reactivity to abrupt changes in ambient demand, and other heuristics to reduce fragmentation of the CMS old gen. Also tightened some assertions, including those related to locking.
Reviewed-by: jmasa

duke@435 1 /*
xdono@631 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 class CompactibleFreeListSpace;
duke@435 26
duke@435 27 // A class for maintaining a free list of FreeChunk's. The FreeList
duke@435 28 // maintains a the structure of the list (head, tail, etc.) plus
duke@435 29 // statistics for allocations from the list. The links between items
duke@435 30 // are not part of FreeList. The statistics are
duke@435 31 // used to make decisions about coalescing FreeChunk's when they
duke@435 32 // are swept during collection.
duke@435 33 //
duke@435 34 // See the corresponding .cpp file for a description of the specifics
duke@435 35 // for that implementation.
duke@435 36
duke@435 37 class Mutex;
ysr@1580 38 class TreeList;
duke@435 39
duke@435 40 class FreeList VALUE_OBJ_CLASS_SPEC {
duke@435 41 friend class CompactibleFreeListSpace;
dcubed@587 42 friend class VMStructs;
ysr@1580 43 friend class PrintTreeCensusClosure;
ysr@1580 44
ysr@1580 45 protected:
ysr@1580 46 TreeList* _parent;
ysr@1580 47 TreeList* _left;
ysr@1580 48 TreeList* _right;
ysr@1580 49
ysr@1580 50 private:
ysr@1580 51 FreeChunk* _head; // Head of list of free chunks
duke@435 52 FreeChunk* _tail; // Tail of list of free chunks
ysr@1580 53 size_t _size; // Size in Heap words of each chunk
duke@435 54 ssize_t _count; // Number of entries in list
duke@435 55 size_t _hint; // next larger size list with a positive surplus
duke@435 56
ysr@1580 57 AllocationStats _allocation_stats; // allocation-related statistics
duke@435 58
duke@435 59 #ifdef ASSERT
duke@435 60 Mutex* _protecting_lock;
duke@435 61 #endif
duke@435 62
duke@435 63 // Asserts false if the protecting lock (if any) is not held.
duke@435 64 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
duke@435 65 void assert_proper_lock_protection() const {
duke@435 66 #ifdef ASSERT
duke@435 67 if (_protecting_lock != NULL)
duke@435 68 assert_proper_lock_protection_work();
duke@435 69 #endif
duke@435 70 }
duke@435 71
duke@435 72 // Initialize the allocation statistics.
duke@435 73 protected:
ysr@1580 74 void init_statistics(bool split_birth = false);
duke@435 75 void set_count(ssize_t v) { _count = v;}
ysr@1580 76 void increment_count() {
ysr@1580 77 _count++;
ysr@1580 78 }
ysr@1580 79
duke@435 80 void decrement_count() {
duke@435 81 _count--;
ysr@447 82 assert(_count >= 0, "Count should not be negative");
ysr@447 83 }
duke@435 84
duke@435 85 public:
duke@435 86 // Constructor
duke@435 87 // Construct a list without any entries.
duke@435 88 FreeList();
duke@435 89 // Construct a list with "fc" as the first (and lone) entry in the list.
duke@435 90 FreeList(FreeChunk* fc);
duke@435 91 // Construct a list which will have a FreeChunk at address "addr" and
duke@435 92 // of size "size" as the first (and lone) entry in the list.
duke@435 93 FreeList(HeapWord* addr, size_t size);
duke@435 94
duke@435 95 // Reset the head, tail, hint, and count of a free list.
duke@435 96 void reset(size_t hint);
duke@435 97
duke@435 98 // Declare the current free list to be protected by the given lock.
duke@435 99 #ifdef ASSERT
duke@435 100 void set_protecting_lock(Mutex* protecting_lock) {
duke@435 101 _protecting_lock = protecting_lock;
duke@435 102 }
duke@435 103 #endif
duke@435 104
duke@435 105 // Accessors.
duke@435 106 FreeChunk* head() const {
duke@435 107 assert_proper_lock_protection();
duke@435 108 return _head;
duke@435 109 }
duke@435 110 void set_head(FreeChunk* v) {
duke@435 111 assert_proper_lock_protection();
duke@435 112 _head = v;
duke@435 113 assert(!_head || _head->size() == _size, "bad chunk size");
duke@435 114 }
duke@435 115 // Set the head of the list and set the prev field of non-null
duke@435 116 // values to NULL.
duke@435 117 void link_head(FreeChunk* v) {
duke@435 118 assert_proper_lock_protection();
duke@435 119 set_head(v);
duke@435 120 // If this method is not used (just set the head instead),
duke@435 121 // this check can be avoided.
duke@435 122 if (v != NULL) {
duke@435 123 v->linkPrev(NULL);
duke@435 124 }
duke@435 125 }
duke@435 126
duke@435 127 FreeChunk* tail() const {
duke@435 128 assert_proper_lock_protection();
duke@435 129 return _tail;
duke@435 130 }
duke@435 131 void set_tail(FreeChunk* v) {
duke@435 132 assert_proper_lock_protection();
duke@435 133 _tail = v;
duke@435 134 assert(!_tail || _tail->size() == _size, "bad chunk size");
duke@435 135 }
duke@435 136 // Set the tail of the list and set the next field of non-null
duke@435 137 // values to NULL.
duke@435 138 void link_tail(FreeChunk* v) {
duke@435 139 assert_proper_lock_protection();
duke@435 140 set_tail(v);
duke@435 141 if (v != NULL) {
duke@435 142 v->clearNext();
duke@435 143 }
duke@435 144 }
duke@435 145
duke@435 146 // No locking checks in read-accessors: lock-free reads (only) are benign.
duke@435 147 // Readers are expected to have the lock if they are doing work that
duke@435 148 // requires atomicity guarantees in sections of code.
duke@435 149 size_t size() const {
duke@435 150 return _size;
duke@435 151 }
duke@435 152 void set_size(size_t v) {
duke@435 153 assert_proper_lock_protection();
duke@435 154 _size = v;
duke@435 155 }
duke@435 156 ssize_t count() const {
duke@435 157 return _count;
duke@435 158 }
duke@435 159 size_t hint() const {
duke@435 160 return _hint;
duke@435 161 }
duke@435 162 void set_hint(size_t v) {
duke@435 163 assert_proper_lock_protection();
duke@435 164 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
duke@435 165 }
duke@435 166
duke@435 167 // Accessors for statistics
duke@435 168 AllocationStats* allocation_stats() {
duke@435 169 assert_proper_lock_protection();
duke@435 170 return &_allocation_stats;
duke@435 171 }
duke@435 172
duke@435 173 ssize_t desired() const {
duke@435 174 return _allocation_stats.desired();
duke@435 175 }
ysr@447 176 void set_desired(ssize_t v) {
ysr@447 177 assert_proper_lock_protection();
ysr@447 178 _allocation_stats.set_desired(v);
ysr@447 179 }
duke@435 180 void compute_desired(float inter_sweep_current,
ysr@1580 181 float inter_sweep_estimate,
ysr@1580 182 float intra_sweep_estimate) {
duke@435 183 assert_proper_lock_protection();
duke@435 184 _allocation_stats.compute_desired(_count,
duke@435 185 inter_sweep_current,
ysr@1580 186 inter_sweep_estimate,
ysr@1580 187 intra_sweep_estimate);
duke@435 188 }
duke@435 189 ssize_t coalDesired() const {
duke@435 190 return _allocation_stats.coalDesired();
duke@435 191 }
duke@435 192 void set_coalDesired(ssize_t v) {
duke@435 193 assert_proper_lock_protection();
duke@435 194 _allocation_stats.set_coalDesired(v);
duke@435 195 }
duke@435 196
duke@435 197 ssize_t surplus() const {
duke@435 198 return _allocation_stats.surplus();
duke@435 199 }
duke@435 200 void set_surplus(ssize_t v) {
duke@435 201 assert_proper_lock_protection();
duke@435 202 _allocation_stats.set_surplus(v);
duke@435 203 }
duke@435 204 void increment_surplus() {
duke@435 205 assert_proper_lock_protection();
duke@435 206 _allocation_stats.increment_surplus();
duke@435 207 }
duke@435 208 void decrement_surplus() {
duke@435 209 assert_proper_lock_protection();
duke@435 210 _allocation_stats.decrement_surplus();
duke@435 211 }
duke@435 212
duke@435 213 ssize_t bfrSurp() const {
duke@435 214 return _allocation_stats.bfrSurp();
duke@435 215 }
duke@435 216 void set_bfrSurp(ssize_t v) {
duke@435 217 assert_proper_lock_protection();
duke@435 218 _allocation_stats.set_bfrSurp(v);
duke@435 219 }
duke@435 220 ssize_t prevSweep() const {
duke@435 221 return _allocation_stats.prevSweep();
duke@435 222 }
duke@435 223 void set_prevSweep(ssize_t v) {
duke@435 224 assert_proper_lock_protection();
duke@435 225 _allocation_stats.set_prevSweep(v);
duke@435 226 }
duke@435 227 ssize_t beforeSweep() const {
duke@435 228 return _allocation_stats.beforeSweep();
duke@435 229 }
duke@435 230 void set_beforeSweep(ssize_t v) {
duke@435 231 assert_proper_lock_protection();
duke@435 232 _allocation_stats.set_beforeSweep(v);
duke@435 233 }
duke@435 234
duke@435 235 ssize_t coalBirths() const {
duke@435 236 return _allocation_stats.coalBirths();
duke@435 237 }
duke@435 238 void set_coalBirths(ssize_t v) {
duke@435 239 assert_proper_lock_protection();
duke@435 240 _allocation_stats.set_coalBirths(v);
duke@435 241 }
duke@435 242 void increment_coalBirths() {
duke@435 243 assert_proper_lock_protection();
duke@435 244 _allocation_stats.increment_coalBirths();
duke@435 245 }
duke@435 246
duke@435 247 ssize_t coalDeaths() const {
duke@435 248 return _allocation_stats.coalDeaths();
duke@435 249 }
duke@435 250 void set_coalDeaths(ssize_t v) {
duke@435 251 assert_proper_lock_protection();
duke@435 252 _allocation_stats.set_coalDeaths(v);
duke@435 253 }
duke@435 254 void increment_coalDeaths() {
duke@435 255 assert_proper_lock_protection();
duke@435 256 _allocation_stats.increment_coalDeaths();
duke@435 257 }
duke@435 258
duke@435 259 ssize_t splitBirths() const {
duke@435 260 return _allocation_stats.splitBirths();
duke@435 261 }
duke@435 262 void set_splitBirths(ssize_t v) {
duke@435 263 assert_proper_lock_protection();
duke@435 264 _allocation_stats.set_splitBirths(v);
duke@435 265 }
duke@435 266 void increment_splitBirths() {
duke@435 267 assert_proper_lock_protection();
duke@435 268 _allocation_stats.increment_splitBirths();
duke@435 269 }
duke@435 270
duke@435 271 ssize_t splitDeaths() const {
duke@435 272 return _allocation_stats.splitDeaths();
duke@435 273 }
duke@435 274 void set_splitDeaths(ssize_t v) {
duke@435 275 assert_proper_lock_protection();
duke@435 276 _allocation_stats.set_splitDeaths(v);
duke@435 277 }
duke@435 278 void increment_splitDeaths() {
duke@435 279 assert_proper_lock_protection();
duke@435 280 _allocation_stats.increment_splitDeaths();
duke@435 281 }
duke@435 282
duke@435 283 NOT_PRODUCT(
duke@435 284 // For debugging. The "_returnedBytes" in all the lists are summed
duke@435 285 // and compared with the total number of bytes swept during a
duke@435 286 // collection.
duke@435 287 size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
duke@435 288 void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
duke@435 289 void increment_returnedBytes_by(size_t v) {
duke@435 290 _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
duke@435 291 }
duke@435 292 )
duke@435 293
duke@435 294 // Unlink head of list and return it. Returns NULL if
duke@435 295 // the list is empty.
duke@435 296 FreeChunk* getChunkAtHead();
duke@435 297
duke@435 298 // Remove the first "n" or "count", whichever is smaller, chunks from the
duke@435 299 // list, setting "fl", which is required to be empty, to point to them.
duke@435 300 void getFirstNChunksFromList(size_t n, FreeList* fl);
duke@435 301
duke@435 302 // Unlink this chunk from it's free list
duke@435 303 void removeChunk(FreeChunk* fc);
duke@435 304
duke@435 305 // Add this chunk to this free list.
duke@435 306 void returnChunkAtHead(FreeChunk* fc);
duke@435 307 void returnChunkAtTail(FreeChunk* fc);
duke@435 308
duke@435 309 // Similar to returnChunk* but also records some diagnostic
duke@435 310 // information.
duke@435 311 void returnChunkAtHead(FreeChunk* fc, bool record_return);
duke@435 312 void returnChunkAtTail(FreeChunk* fc, bool record_return);
duke@435 313
duke@435 314 // Prepend "fl" (whose size is required to be the same as that of "this")
duke@435 315 // to the front of "this" list.
duke@435 316 void prepend(FreeList* fl);
duke@435 317
duke@435 318 // Verify that the chunk is in the list.
duke@435 319 // found. Return NULL if "fc" is not found.
duke@435 320 bool verifyChunkInFreeLists(FreeChunk* fc) const;
ysr@447 321
ysr@1580 322 // Stats verification
ysr@1580 323 void verify_stats() const PRODUCT_RETURN;
ysr@1580 324
ysr@447 325 // Printing support
ysr@447 326 static void print_labels_on(outputStream* st, const char* c);
ysr@447 327 void print_on(outputStream* st, const char* c = NULL) const;
duke@435 328 };

mercurial