src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp

Tue, 23 Nov 2010 13:22:55 -0800

author
stefank
date
Tue, 23 Nov 2010 13:22:55 -0800
changeset 2314
f95d63e2154a
parent 1907
c18cbe5936b8
permissions
-rw-r--r--

6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg

duke@435 1 /*
stefank@2314 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/shared/allocationStats.hpp"
stefank@2314 29
duke@435 30 class CompactibleFreeListSpace;
duke@435 31
duke@435 32 // A class for maintaining a free list of FreeChunk's. The FreeList
duke@435 33 // maintains a the structure of the list (head, tail, etc.) plus
duke@435 34 // statistics for allocations from the list. The links between items
duke@435 35 // are not part of FreeList. The statistics are
duke@435 36 // used to make decisions about coalescing FreeChunk's when they
duke@435 37 // are swept during collection.
duke@435 38 //
duke@435 39 // See the corresponding .cpp file for a description of the specifics
duke@435 40 // for that implementation.
duke@435 41
duke@435 42 class Mutex;
ysr@1580 43 class TreeList;
duke@435 44
duke@435 45 class FreeList VALUE_OBJ_CLASS_SPEC {
duke@435 46 friend class CompactibleFreeListSpace;
dcubed@587 47 friend class VMStructs;
ysr@1580 48 friend class PrintTreeCensusClosure;
ysr@1580 49
ysr@1580 50 protected:
ysr@1580 51 TreeList* _parent;
ysr@1580 52 TreeList* _left;
ysr@1580 53 TreeList* _right;
ysr@1580 54
ysr@1580 55 private:
ysr@1580 56 FreeChunk* _head; // Head of list of free chunks
duke@435 57 FreeChunk* _tail; // Tail of list of free chunks
ysr@1580 58 size_t _size; // Size in Heap words of each chunk
duke@435 59 ssize_t _count; // Number of entries in list
duke@435 60 size_t _hint; // next larger size list with a positive surplus
duke@435 61
ysr@1580 62 AllocationStats _allocation_stats; // allocation-related statistics
duke@435 63
duke@435 64 #ifdef ASSERT
duke@435 65 Mutex* _protecting_lock;
duke@435 66 #endif
duke@435 67
duke@435 68 // Asserts false if the protecting lock (if any) is not held.
duke@435 69 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
duke@435 70 void assert_proper_lock_protection() const {
duke@435 71 #ifdef ASSERT
duke@435 72 if (_protecting_lock != NULL)
duke@435 73 assert_proper_lock_protection_work();
duke@435 74 #endif
duke@435 75 }
duke@435 76
duke@435 77 // Initialize the allocation statistics.
duke@435 78 protected:
ysr@1580 79 void init_statistics(bool split_birth = false);
duke@435 80 void set_count(ssize_t v) { _count = v;}
ysr@1580 81 void increment_count() {
ysr@1580 82 _count++;
ysr@1580 83 }
ysr@1580 84
duke@435 85 void decrement_count() {
duke@435 86 _count--;
ysr@447 87 assert(_count >= 0, "Count should not be negative");
ysr@447 88 }
duke@435 89
duke@435 90 public:
duke@435 91 // Constructor
duke@435 92 // Construct a list without any entries.
duke@435 93 FreeList();
duke@435 94 // Construct a list with "fc" as the first (and lone) entry in the list.
duke@435 95 FreeList(FreeChunk* fc);
duke@435 96 // Construct a list which will have a FreeChunk at address "addr" and
duke@435 97 // of size "size" as the first (and lone) entry in the list.
duke@435 98 FreeList(HeapWord* addr, size_t size);
duke@435 99
duke@435 100 // Reset the head, tail, hint, and count of a free list.
duke@435 101 void reset(size_t hint);
duke@435 102
duke@435 103 // Declare the current free list to be protected by the given lock.
duke@435 104 #ifdef ASSERT
duke@435 105 void set_protecting_lock(Mutex* protecting_lock) {
duke@435 106 _protecting_lock = protecting_lock;
duke@435 107 }
duke@435 108 #endif
duke@435 109
duke@435 110 // Accessors.
duke@435 111 FreeChunk* head() const {
duke@435 112 assert_proper_lock_protection();
duke@435 113 return _head;
duke@435 114 }
duke@435 115 void set_head(FreeChunk* v) {
duke@435 116 assert_proper_lock_protection();
duke@435 117 _head = v;
duke@435 118 assert(!_head || _head->size() == _size, "bad chunk size");
duke@435 119 }
duke@435 120 // Set the head of the list and set the prev field of non-null
duke@435 121 // values to NULL.
duke@435 122 void link_head(FreeChunk* v) {
duke@435 123 assert_proper_lock_protection();
duke@435 124 set_head(v);
duke@435 125 // If this method is not used (just set the head instead),
duke@435 126 // this check can be avoided.
duke@435 127 if (v != NULL) {
duke@435 128 v->linkPrev(NULL);
duke@435 129 }
duke@435 130 }
duke@435 131
duke@435 132 FreeChunk* tail() const {
duke@435 133 assert_proper_lock_protection();
duke@435 134 return _tail;
duke@435 135 }
duke@435 136 void set_tail(FreeChunk* v) {
duke@435 137 assert_proper_lock_protection();
duke@435 138 _tail = v;
duke@435 139 assert(!_tail || _tail->size() == _size, "bad chunk size");
duke@435 140 }
duke@435 141 // Set the tail of the list and set the next field of non-null
duke@435 142 // values to NULL.
duke@435 143 void link_tail(FreeChunk* v) {
duke@435 144 assert_proper_lock_protection();
duke@435 145 set_tail(v);
duke@435 146 if (v != NULL) {
duke@435 147 v->clearNext();
duke@435 148 }
duke@435 149 }
duke@435 150
duke@435 151 // No locking checks in read-accessors: lock-free reads (only) are benign.
duke@435 152 // Readers are expected to have the lock if they are doing work that
duke@435 153 // requires atomicity guarantees in sections of code.
duke@435 154 size_t size() const {
duke@435 155 return _size;
duke@435 156 }
duke@435 157 void set_size(size_t v) {
duke@435 158 assert_proper_lock_protection();
duke@435 159 _size = v;
duke@435 160 }
duke@435 161 ssize_t count() const {
duke@435 162 return _count;
duke@435 163 }
duke@435 164 size_t hint() const {
duke@435 165 return _hint;
duke@435 166 }
duke@435 167 void set_hint(size_t v) {
duke@435 168 assert_proper_lock_protection();
duke@435 169 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
duke@435 170 }
duke@435 171
duke@435 172 // Accessors for statistics
duke@435 173 AllocationStats* allocation_stats() {
duke@435 174 assert_proper_lock_protection();
duke@435 175 return &_allocation_stats;
duke@435 176 }
duke@435 177
duke@435 178 ssize_t desired() const {
duke@435 179 return _allocation_stats.desired();
duke@435 180 }
ysr@447 181 void set_desired(ssize_t v) {
ysr@447 182 assert_proper_lock_protection();
ysr@447 183 _allocation_stats.set_desired(v);
ysr@447 184 }
duke@435 185 void compute_desired(float inter_sweep_current,
ysr@1580 186 float inter_sweep_estimate,
ysr@1580 187 float intra_sweep_estimate) {
duke@435 188 assert_proper_lock_protection();
duke@435 189 _allocation_stats.compute_desired(_count,
duke@435 190 inter_sweep_current,
ysr@1580 191 inter_sweep_estimate,
ysr@1580 192 intra_sweep_estimate);
duke@435 193 }
duke@435 194 ssize_t coalDesired() const {
duke@435 195 return _allocation_stats.coalDesired();
duke@435 196 }
duke@435 197 void set_coalDesired(ssize_t v) {
duke@435 198 assert_proper_lock_protection();
duke@435 199 _allocation_stats.set_coalDesired(v);
duke@435 200 }
duke@435 201
duke@435 202 ssize_t surplus() const {
duke@435 203 return _allocation_stats.surplus();
duke@435 204 }
duke@435 205 void set_surplus(ssize_t v) {
duke@435 206 assert_proper_lock_protection();
duke@435 207 _allocation_stats.set_surplus(v);
duke@435 208 }
duke@435 209 void increment_surplus() {
duke@435 210 assert_proper_lock_protection();
duke@435 211 _allocation_stats.increment_surplus();
duke@435 212 }
duke@435 213 void decrement_surplus() {
duke@435 214 assert_proper_lock_protection();
duke@435 215 _allocation_stats.decrement_surplus();
duke@435 216 }
duke@435 217
duke@435 218 ssize_t bfrSurp() const {
duke@435 219 return _allocation_stats.bfrSurp();
duke@435 220 }
duke@435 221 void set_bfrSurp(ssize_t v) {
duke@435 222 assert_proper_lock_protection();
duke@435 223 _allocation_stats.set_bfrSurp(v);
duke@435 224 }
duke@435 225 ssize_t prevSweep() const {
duke@435 226 return _allocation_stats.prevSweep();
duke@435 227 }
duke@435 228 void set_prevSweep(ssize_t v) {
duke@435 229 assert_proper_lock_protection();
duke@435 230 _allocation_stats.set_prevSweep(v);
duke@435 231 }
duke@435 232 ssize_t beforeSweep() const {
duke@435 233 return _allocation_stats.beforeSweep();
duke@435 234 }
duke@435 235 void set_beforeSweep(ssize_t v) {
duke@435 236 assert_proper_lock_protection();
duke@435 237 _allocation_stats.set_beforeSweep(v);
duke@435 238 }
duke@435 239
duke@435 240 ssize_t coalBirths() const {
duke@435 241 return _allocation_stats.coalBirths();
duke@435 242 }
duke@435 243 void set_coalBirths(ssize_t v) {
duke@435 244 assert_proper_lock_protection();
duke@435 245 _allocation_stats.set_coalBirths(v);
duke@435 246 }
duke@435 247 void increment_coalBirths() {
duke@435 248 assert_proper_lock_protection();
duke@435 249 _allocation_stats.increment_coalBirths();
duke@435 250 }
duke@435 251
duke@435 252 ssize_t coalDeaths() const {
duke@435 253 return _allocation_stats.coalDeaths();
duke@435 254 }
duke@435 255 void set_coalDeaths(ssize_t v) {
duke@435 256 assert_proper_lock_protection();
duke@435 257 _allocation_stats.set_coalDeaths(v);
duke@435 258 }
duke@435 259 void increment_coalDeaths() {
duke@435 260 assert_proper_lock_protection();
duke@435 261 _allocation_stats.increment_coalDeaths();
duke@435 262 }
duke@435 263
duke@435 264 ssize_t splitBirths() const {
duke@435 265 return _allocation_stats.splitBirths();
duke@435 266 }
duke@435 267 void set_splitBirths(ssize_t v) {
duke@435 268 assert_proper_lock_protection();
duke@435 269 _allocation_stats.set_splitBirths(v);
duke@435 270 }
duke@435 271 void increment_splitBirths() {
duke@435 272 assert_proper_lock_protection();
duke@435 273 _allocation_stats.increment_splitBirths();
duke@435 274 }
duke@435 275
duke@435 276 ssize_t splitDeaths() const {
duke@435 277 return _allocation_stats.splitDeaths();
duke@435 278 }
duke@435 279 void set_splitDeaths(ssize_t v) {
duke@435 280 assert_proper_lock_protection();
duke@435 281 _allocation_stats.set_splitDeaths(v);
duke@435 282 }
duke@435 283 void increment_splitDeaths() {
duke@435 284 assert_proper_lock_protection();
duke@435 285 _allocation_stats.increment_splitDeaths();
duke@435 286 }
duke@435 287
duke@435 288 NOT_PRODUCT(
duke@435 289 // For debugging. The "_returnedBytes" in all the lists are summed
duke@435 290 // and compared with the total number of bytes swept during a
duke@435 291 // collection.
duke@435 292 size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
duke@435 293 void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
duke@435 294 void increment_returnedBytes_by(size_t v) {
duke@435 295 _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
duke@435 296 }
duke@435 297 )
duke@435 298
duke@435 299 // Unlink head of list and return it. Returns NULL if
duke@435 300 // the list is empty.
duke@435 301 FreeChunk* getChunkAtHead();
duke@435 302
duke@435 303 // Remove the first "n" or "count", whichever is smaller, chunks from the
duke@435 304 // list, setting "fl", which is required to be empty, to point to them.
duke@435 305 void getFirstNChunksFromList(size_t n, FreeList* fl);
duke@435 306
duke@435 307 // Unlink this chunk from it's free list
duke@435 308 void removeChunk(FreeChunk* fc);
duke@435 309
duke@435 310 // Add this chunk to this free list.
duke@435 311 void returnChunkAtHead(FreeChunk* fc);
duke@435 312 void returnChunkAtTail(FreeChunk* fc);
duke@435 313
duke@435 314 // Similar to returnChunk* but also records some diagnostic
duke@435 315 // information.
duke@435 316 void returnChunkAtHead(FreeChunk* fc, bool record_return);
duke@435 317 void returnChunkAtTail(FreeChunk* fc, bool record_return);
duke@435 318
duke@435 319 // Prepend "fl" (whose size is required to be the same as that of "this")
duke@435 320 // to the front of "this" list.
duke@435 321 void prepend(FreeList* fl);
duke@435 322
duke@435 323 // Verify that the chunk is in the list.
duke@435 324 // found. Return NULL if "fc" is not found.
duke@435 325 bool verifyChunkInFreeLists(FreeChunk* fc) const;
ysr@447 326
ysr@1580 327 // Stats verification
ysr@1580 328 void verify_stats() const PRODUCT_RETURN;
ysr@1580 329
ysr@447 330 // Printing support
ysr@447 331 static void print_labels_on(outputStream* st, const char* c);
ysr@447 332 void print_on(outputStream* st, const char* c = NULL) const;
duke@435 333 };
stefank@2314 334
stefank@2314 335 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP

mercurial