src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp

Thu, 20 Nov 2008 16:56:09 -0800

author
ysr
date
Thu, 20 Nov 2008 16:56:09 -0800
changeset 888
c96030fff130
parent 631
d1605aabd0a1
child 1580
e018e6884bd8
permissions
-rw-r--r--

6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa

duke@435 1 /*
xdono@631 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 class CompactibleFreeListSpace;
duke@435 26
duke@435 27 // A class for maintaining a free list of FreeChunk's. The FreeList
duke@435 28 // maintains a the structure of the list (head, tail, etc.) plus
duke@435 29 // statistics for allocations from the list. The links between items
duke@435 30 // are not part of FreeList. The statistics are
duke@435 31 // used to make decisions about coalescing FreeChunk's when they
duke@435 32 // are swept during collection.
duke@435 33 //
duke@435 34 // See the corresponding .cpp file for a description of the specifics
duke@435 35 // for that implementation.
duke@435 36
duke@435 37 class Mutex;
duke@435 38
duke@435 39 class FreeList VALUE_OBJ_CLASS_SPEC {
duke@435 40 friend class CompactibleFreeListSpace;
dcubed@587 41 friend class VMStructs;
ysr@447 42 friend class printTreeCensusClosure;
duke@435 43 FreeChunk* _head; // List of free chunks
duke@435 44 FreeChunk* _tail; // Tail of list of free chunks
duke@435 45 size_t _size; // Size in Heap words of each chunks
duke@435 46 ssize_t _count; // Number of entries in list
duke@435 47 size_t _hint; // next larger size list with a positive surplus
duke@435 48
duke@435 49 AllocationStats _allocation_stats; // statistics for smart allocation
duke@435 50
duke@435 51 #ifdef ASSERT
duke@435 52 Mutex* _protecting_lock;
duke@435 53 #endif
duke@435 54
duke@435 55 // Asserts false if the protecting lock (if any) is not held.
duke@435 56 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
duke@435 57 void assert_proper_lock_protection() const {
duke@435 58 #ifdef ASSERT
duke@435 59 if (_protecting_lock != NULL)
duke@435 60 assert_proper_lock_protection_work();
duke@435 61 #endif
duke@435 62 }
duke@435 63
duke@435 64 // Initialize the allocation statistics.
duke@435 65 protected:
duke@435 66 void init_statistics();
duke@435 67 void set_count(ssize_t v) { _count = v;}
ysr@447 68 void increment_count() { _count++; }
duke@435 69 void decrement_count() {
duke@435 70 _count--;
ysr@447 71 assert(_count >= 0, "Count should not be negative");
ysr@447 72 }
duke@435 73
duke@435 74 public:
duke@435 75 // Constructor
duke@435 76 // Construct a list without any entries.
duke@435 77 FreeList();
duke@435 78 // Construct a list with "fc" as the first (and lone) entry in the list.
duke@435 79 FreeList(FreeChunk* fc);
duke@435 80 // Construct a list which will have a FreeChunk at address "addr" and
duke@435 81 // of size "size" as the first (and lone) entry in the list.
duke@435 82 FreeList(HeapWord* addr, size_t size);
duke@435 83
duke@435 84 // Reset the head, tail, hint, and count of a free list.
duke@435 85 void reset(size_t hint);
duke@435 86
duke@435 87 // Declare the current free list to be protected by the given lock.
duke@435 88 #ifdef ASSERT
duke@435 89 void set_protecting_lock(Mutex* protecting_lock) {
duke@435 90 _protecting_lock = protecting_lock;
duke@435 91 }
duke@435 92 #endif
duke@435 93
duke@435 94 // Accessors.
duke@435 95 FreeChunk* head() const {
duke@435 96 assert_proper_lock_protection();
duke@435 97 return _head;
duke@435 98 }
duke@435 99 void set_head(FreeChunk* v) {
duke@435 100 assert_proper_lock_protection();
duke@435 101 _head = v;
duke@435 102 assert(!_head || _head->size() == _size, "bad chunk size");
duke@435 103 }
duke@435 104 // Set the head of the list and set the prev field of non-null
duke@435 105 // values to NULL.
duke@435 106 void link_head(FreeChunk* v) {
duke@435 107 assert_proper_lock_protection();
duke@435 108 set_head(v);
duke@435 109 // If this method is not used (just set the head instead),
duke@435 110 // this check can be avoided.
duke@435 111 if (v != NULL) {
duke@435 112 v->linkPrev(NULL);
duke@435 113 }
duke@435 114 }
duke@435 115
duke@435 116 FreeChunk* tail() const {
duke@435 117 assert_proper_lock_protection();
duke@435 118 return _tail;
duke@435 119 }
duke@435 120 void set_tail(FreeChunk* v) {
duke@435 121 assert_proper_lock_protection();
duke@435 122 _tail = v;
duke@435 123 assert(!_tail || _tail->size() == _size, "bad chunk size");
duke@435 124 }
duke@435 125 // Set the tail of the list and set the next field of non-null
duke@435 126 // values to NULL.
duke@435 127 void link_tail(FreeChunk* v) {
duke@435 128 assert_proper_lock_protection();
duke@435 129 set_tail(v);
duke@435 130 if (v != NULL) {
duke@435 131 v->clearNext();
duke@435 132 }
duke@435 133 }
duke@435 134
duke@435 135 // No locking checks in read-accessors: lock-free reads (only) are benign.
duke@435 136 // Readers are expected to have the lock if they are doing work that
duke@435 137 // requires atomicity guarantees in sections of code.
duke@435 138 size_t size() const {
duke@435 139 return _size;
duke@435 140 }
duke@435 141 void set_size(size_t v) {
duke@435 142 assert_proper_lock_protection();
duke@435 143 _size = v;
duke@435 144 }
duke@435 145 ssize_t count() const {
duke@435 146 return _count;
duke@435 147 }
duke@435 148 size_t hint() const {
duke@435 149 return _hint;
duke@435 150 }
duke@435 151 void set_hint(size_t v) {
duke@435 152 assert_proper_lock_protection();
duke@435 153 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
duke@435 154 }
duke@435 155
duke@435 156 // Accessors for statistics
duke@435 157 AllocationStats* allocation_stats() {
duke@435 158 assert_proper_lock_protection();
duke@435 159 return &_allocation_stats;
duke@435 160 }
duke@435 161
duke@435 162 ssize_t desired() const {
duke@435 163 return _allocation_stats.desired();
duke@435 164 }
ysr@447 165 void set_desired(ssize_t v) {
ysr@447 166 assert_proper_lock_protection();
ysr@447 167 _allocation_stats.set_desired(v);
ysr@447 168 }
duke@435 169 void compute_desired(float inter_sweep_current,
duke@435 170 float inter_sweep_estimate) {
duke@435 171 assert_proper_lock_protection();
duke@435 172 _allocation_stats.compute_desired(_count,
duke@435 173 inter_sweep_current,
duke@435 174 inter_sweep_estimate);
duke@435 175 }
duke@435 176 ssize_t coalDesired() const {
duke@435 177 return _allocation_stats.coalDesired();
duke@435 178 }
duke@435 179 void set_coalDesired(ssize_t v) {
duke@435 180 assert_proper_lock_protection();
duke@435 181 _allocation_stats.set_coalDesired(v);
duke@435 182 }
duke@435 183
duke@435 184 ssize_t surplus() const {
duke@435 185 return _allocation_stats.surplus();
duke@435 186 }
duke@435 187 void set_surplus(ssize_t v) {
duke@435 188 assert_proper_lock_protection();
duke@435 189 _allocation_stats.set_surplus(v);
duke@435 190 }
duke@435 191 void increment_surplus() {
duke@435 192 assert_proper_lock_protection();
duke@435 193 _allocation_stats.increment_surplus();
duke@435 194 }
duke@435 195 void decrement_surplus() {
duke@435 196 assert_proper_lock_protection();
duke@435 197 _allocation_stats.decrement_surplus();
duke@435 198 }
duke@435 199
duke@435 200 ssize_t bfrSurp() const {
duke@435 201 return _allocation_stats.bfrSurp();
duke@435 202 }
duke@435 203 void set_bfrSurp(ssize_t v) {
duke@435 204 assert_proper_lock_protection();
duke@435 205 _allocation_stats.set_bfrSurp(v);
duke@435 206 }
duke@435 207 ssize_t prevSweep() const {
duke@435 208 return _allocation_stats.prevSweep();
duke@435 209 }
duke@435 210 void set_prevSweep(ssize_t v) {
duke@435 211 assert_proper_lock_protection();
duke@435 212 _allocation_stats.set_prevSweep(v);
duke@435 213 }
duke@435 214 ssize_t beforeSweep() const {
duke@435 215 return _allocation_stats.beforeSweep();
duke@435 216 }
duke@435 217 void set_beforeSweep(ssize_t v) {
duke@435 218 assert_proper_lock_protection();
duke@435 219 _allocation_stats.set_beforeSweep(v);
duke@435 220 }
duke@435 221
duke@435 222 ssize_t coalBirths() const {
duke@435 223 return _allocation_stats.coalBirths();
duke@435 224 }
duke@435 225 void set_coalBirths(ssize_t v) {
duke@435 226 assert_proper_lock_protection();
duke@435 227 _allocation_stats.set_coalBirths(v);
duke@435 228 }
duke@435 229 void increment_coalBirths() {
duke@435 230 assert_proper_lock_protection();
duke@435 231 _allocation_stats.increment_coalBirths();
duke@435 232 }
duke@435 233
duke@435 234 ssize_t coalDeaths() const {
duke@435 235 return _allocation_stats.coalDeaths();
duke@435 236 }
duke@435 237 void set_coalDeaths(ssize_t v) {
duke@435 238 assert_proper_lock_protection();
duke@435 239 _allocation_stats.set_coalDeaths(v);
duke@435 240 }
duke@435 241 void increment_coalDeaths() {
duke@435 242 assert_proper_lock_protection();
duke@435 243 _allocation_stats.increment_coalDeaths();
duke@435 244 }
duke@435 245
duke@435 246 ssize_t splitBirths() const {
duke@435 247 return _allocation_stats.splitBirths();
duke@435 248 }
duke@435 249 void set_splitBirths(ssize_t v) {
duke@435 250 assert_proper_lock_protection();
duke@435 251 _allocation_stats.set_splitBirths(v);
duke@435 252 }
duke@435 253 void increment_splitBirths() {
duke@435 254 assert_proper_lock_protection();
duke@435 255 _allocation_stats.increment_splitBirths();
duke@435 256 }
duke@435 257
duke@435 258 ssize_t splitDeaths() const {
duke@435 259 return _allocation_stats.splitDeaths();
duke@435 260 }
duke@435 261 void set_splitDeaths(ssize_t v) {
duke@435 262 assert_proper_lock_protection();
duke@435 263 _allocation_stats.set_splitDeaths(v);
duke@435 264 }
duke@435 265 void increment_splitDeaths() {
duke@435 266 assert_proper_lock_protection();
duke@435 267 _allocation_stats.increment_splitDeaths();
duke@435 268 }
duke@435 269
duke@435 270 NOT_PRODUCT(
duke@435 271 // For debugging. The "_returnedBytes" in all the lists are summed
duke@435 272 // and compared with the total number of bytes swept during a
duke@435 273 // collection.
duke@435 274 size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
duke@435 275 void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
duke@435 276 void increment_returnedBytes_by(size_t v) {
duke@435 277 _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
duke@435 278 }
duke@435 279 )
duke@435 280
duke@435 281 // Unlink head of list and return it. Returns NULL if
duke@435 282 // the list is empty.
duke@435 283 FreeChunk* getChunkAtHead();
duke@435 284
duke@435 285 // Remove the first "n" or "count", whichever is smaller, chunks from the
duke@435 286 // list, setting "fl", which is required to be empty, to point to them.
duke@435 287 void getFirstNChunksFromList(size_t n, FreeList* fl);
duke@435 288
duke@435 289 // Unlink this chunk from it's free list
duke@435 290 void removeChunk(FreeChunk* fc);
duke@435 291
duke@435 292 // Add this chunk to this free list.
duke@435 293 void returnChunkAtHead(FreeChunk* fc);
duke@435 294 void returnChunkAtTail(FreeChunk* fc);
duke@435 295
duke@435 296 // Similar to returnChunk* but also records some diagnostic
duke@435 297 // information.
duke@435 298 void returnChunkAtHead(FreeChunk* fc, bool record_return);
duke@435 299 void returnChunkAtTail(FreeChunk* fc, bool record_return);
duke@435 300
duke@435 301 // Prepend "fl" (whose size is required to be the same as that of "this")
duke@435 302 // to the front of "this" list.
duke@435 303 void prepend(FreeList* fl);
duke@435 304
duke@435 305 // Verify that the chunk is in the list.
duke@435 306 // found. Return NULL if "fc" is not found.
duke@435 307 bool verifyChunkInFreeLists(FreeChunk* fc) const;
ysr@447 308
ysr@447 309 // Printing support
ysr@447 310 static void print_labels_on(outputStream* st, const char* c);
ysr@447 311 void print_on(outputStream* st, const char* c = NULL) const;
duke@435 312 };

mercurial