1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,301 @@ 1.4 +/* 1.5 + * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +class CompactibleFreeListSpace; 1.29 + 1.30 +// A class for maintaining a free list of FreeChunk's. The FreeList 1.31 +// maintains a the structure of the list (head, tail, etc.) plus 1.32 +// statistics for allocations from the list. The links between items 1.33 +// are not part of FreeList. The statistics are 1.34 +// used to make decisions about coalescing FreeChunk's when they 1.35 +// are swept during collection. 1.36 +// 1.37 +// See the corresponding .cpp file for a description of the specifics 1.38 +// for that implementation. 1.39 + 1.40 +class Mutex; 1.41 + 1.42 +class FreeList VALUE_OBJ_CLASS_SPEC { 1.43 + friend class CompactibleFreeListSpace; 1.44 + FreeChunk* _head; // List of free chunks 1.45 + FreeChunk* _tail; // Tail of list of free chunks 1.46 + size_t _size; // Size in Heap words of each chunks 1.47 + ssize_t _count; // Number of entries in list 1.48 + size_t _hint; // next larger size list with a positive surplus 1.49 + 1.50 + AllocationStats _allocation_stats; // statistics for smart allocation 1.51 + 1.52 +#ifdef ASSERT 1.53 + Mutex* _protecting_lock; 1.54 +#endif 1.55 + 1.56 + // Asserts false if the protecting lock (if any) is not held. 1.57 + void assert_proper_lock_protection_work() const PRODUCT_RETURN; 1.58 + void assert_proper_lock_protection() const { 1.59 +#ifdef ASSERT 1.60 + if (_protecting_lock != NULL) 1.61 + assert_proper_lock_protection_work(); 1.62 +#endif 1.63 + } 1.64 + 1.65 + // Initialize the allocation statistics. 1.66 + protected: 1.67 + void init_statistics(); 1.68 + void set_count(ssize_t v) { _count = v;} 1.69 + void increment_count() { _count++; } 1.70 + void decrement_count() { 1.71 + _count--; 1.72 + assert(_count >= 0, "Count should not be negative"); } 1.73 + 1.74 + public: 1.75 + // Constructor 1.76 + // Construct a list without any entries. 1.77 + FreeList(); 1.78 + // Construct a list with "fc" as the first (and lone) entry in the list. 1.79 + FreeList(FreeChunk* fc); 1.80 + // Construct a list which will have a FreeChunk at address "addr" and 1.81 + // of size "size" as the first (and lone) entry in the list. 1.82 + FreeList(HeapWord* addr, size_t size); 1.83 + 1.84 + // Reset the head, tail, hint, and count of a free list. 1.85 + void reset(size_t hint); 1.86 + 1.87 + // Declare the current free list to be protected by the given lock. 1.88 +#ifdef ASSERT 1.89 + void set_protecting_lock(Mutex* protecting_lock) { 1.90 + _protecting_lock = protecting_lock; 1.91 + } 1.92 +#endif 1.93 + 1.94 + // Accessors. 1.95 + FreeChunk* head() const { 1.96 + assert_proper_lock_protection(); 1.97 + return _head; 1.98 + } 1.99 + void set_head(FreeChunk* v) { 1.100 + assert_proper_lock_protection(); 1.101 + _head = v; 1.102 + assert(!_head || _head->size() == _size, "bad chunk size"); 1.103 + } 1.104 + // Set the head of the list and set the prev field of non-null 1.105 + // values to NULL. 1.106 + void link_head(FreeChunk* v) { 1.107 + assert_proper_lock_protection(); 1.108 + set_head(v); 1.109 + // If this method is not used (just set the head instead), 1.110 + // this check can be avoided. 1.111 + if (v != NULL) { 1.112 + v->linkPrev(NULL); 1.113 + } 1.114 + } 1.115 + 1.116 + FreeChunk* tail() const { 1.117 + assert_proper_lock_protection(); 1.118 + return _tail; 1.119 + } 1.120 + void set_tail(FreeChunk* v) { 1.121 + assert_proper_lock_protection(); 1.122 + _tail = v; 1.123 + assert(!_tail || _tail->size() == _size, "bad chunk size"); 1.124 + } 1.125 + // Set the tail of the list and set the next field of non-null 1.126 + // values to NULL. 1.127 + void link_tail(FreeChunk* v) { 1.128 + assert_proper_lock_protection(); 1.129 + set_tail(v); 1.130 + if (v != NULL) { 1.131 + v->clearNext(); 1.132 + } 1.133 + } 1.134 + 1.135 + // No locking checks in read-accessors: lock-free reads (only) are benign. 1.136 + // Readers are expected to have the lock if they are doing work that 1.137 + // requires atomicity guarantees in sections of code. 1.138 + size_t size() const { 1.139 + return _size; 1.140 + } 1.141 + void set_size(size_t v) { 1.142 + assert_proper_lock_protection(); 1.143 + _size = v; 1.144 + } 1.145 + ssize_t count() const { 1.146 + return _count; 1.147 + } 1.148 + size_t hint() const { 1.149 + return _hint; 1.150 + } 1.151 + void set_hint(size_t v) { 1.152 + assert_proper_lock_protection(); 1.153 + assert(v == 0 || _size < v, "Bad hint"); _hint = v; 1.154 + } 1.155 + 1.156 + // Accessors for statistics 1.157 + AllocationStats* allocation_stats() { 1.158 + assert_proper_lock_protection(); 1.159 + return &_allocation_stats; 1.160 + } 1.161 + 1.162 + ssize_t desired() const { 1.163 + return _allocation_stats.desired(); 1.164 + } 1.165 + void compute_desired(float inter_sweep_current, 1.166 + float inter_sweep_estimate) { 1.167 + assert_proper_lock_protection(); 1.168 + _allocation_stats.compute_desired(_count, 1.169 + inter_sweep_current, 1.170 + inter_sweep_estimate); 1.171 + } 1.172 + ssize_t coalDesired() const { 1.173 + return _allocation_stats.coalDesired(); 1.174 + } 1.175 + void set_coalDesired(ssize_t v) { 1.176 + assert_proper_lock_protection(); 1.177 + _allocation_stats.set_coalDesired(v); 1.178 + } 1.179 + 1.180 + ssize_t surplus() const { 1.181 + return _allocation_stats.surplus(); 1.182 + } 1.183 + void set_surplus(ssize_t v) { 1.184 + assert_proper_lock_protection(); 1.185 + _allocation_stats.set_surplus(v); 1.186 + } 1.187 + void increment_surplus() { 1.188 + assert_proper_lock_protection(); 1.189 + _allocation_stats.increment_surplus(); 1.190 + } 1.191 + void decrement_surplus() { 1.192 + assert_proper_lock_protection(); 1.193 + _allocation_stats.decrement_surplus(); 1.194 + } 1.195 + 1.196 + ssize_t bfrSurp() const { 1.197 + return _allocation_stats.bfrSurp(); 1.198 + } 1.199 + void set_bfrSurp(ssize_t v) { 1.200 + assert_proper_lock_protection(); 1.201 + _allocation_stats.set_bfrSurp(v); 1.202 + } 1.203 + ssize_t prevSweep() const { 1.204 + return _allocation_stats.prevSweep(); 1.205 + } 1.206 + void set_prevSweep(ssize_t v) { 1.207 + assert_proper_lock_protection(); 1.208 + _allocation_stats.set_prevSweep(v); 1.209 + } 1.210 + ssize_t beforeSweep() const { 1.211 + return _allocation_stats.beforeSweep(); 1.212 + } 1.213 + void set_beforeSweep(ssize_t v) { 1.214 + assert_proper_lock_protection(); 1.215 + _allocation_stats.set_beforeSweep(v); 1.216 + } 1.217 + 1.218 + ssize_t coalBirths() const { 1.219 + return _allocation_stats.coalBirths(); 1.220 + } 1.221 + void set_coalBirths(ssize_t v) { 1.222 + assert_proper_lock_protection(); 1.223 + _allocation_stats.set_coalBirths(v); 1.224 + } 1.225 + void increment_coalBirths() { 1.226 + assert_proper_lock_protection(); 1.227 + _allocation_stats.increment_coalBirths(); 1.228 + } 1.229 + 1.230 + ssize_t coalDeaths() const { 1.231 + return _allocation_stats.coalDeaths(); 1.232 + } 1.233 + void set_coalDeaths(ssize_t v) { 1.234 + assert_proper_lock_protection(); 1.235 + _allocation_stats.set_coalDeaths(v); 1.236 + } 1.237 + void increment_coalDeaths() { 1.238 + assert_proper_lock_protection(); 1.239 + _allocation_stats.increment_coalDeaths(); 1.240 + } 1.241 + 1.242 + ssize_t splitBirths() const { 1.243 + return _allocation_stats.splitBirths(); 1.244 + } 1.245 + void set_splitBirths(ssize_t v) { 1.246 + assert_proper_lock_protection(); 1.247 + _allocation_stats.set_splitBirths(v); 1.248 + } 1.249 + void increment_splitBirths() { 1.250 + assert_proper_lock_protection(); 1.251 + _allocation_stats.increment_splitBirths(); 1.252 + } 1.253 + 1.254 + ssize_t splitDeaths() const { 1.255 + return _allocation_stats.splitDeaths(); 1.256 + } 1.257 + void set_splitDeaths(ssize_t v) { 1.258 + assert_proper_lock_protection(); 1.259 + _allocation_stats.set_splitDeaths(v); 1.260 + } 1.261 + void increment_splitDeaths() { 1.262 + assert_proper_lock_protection(); 1.263 + _allocation_stats.increment_splitDeaths(); 1.264 + } 1.265 + 1.266 + NOT_PRODUCT( 1.267 + // For debugging. The "_returnedBytes" in all the lists are summed 1.268 + // and compared with the total number of bytes swept during a 1.269 + // collection. 1.270 + size_t returnedBytes() const { return _allocation_stats.returnedBytes(); } 1.271 + void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); } 1.272 + void increment_returnedBytes_by(size_t v) { 1.273 + _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v); 1.274 + } 1.275 + ) 1.276 + 1.277 + // Unlink head of list and return it. Returns NULL if 1.278 + // the list is empty. 1.279 + FreeChunk* getChunkAtHead(); 1.280 + 1.281 + // Remove the first "n" or "count", whichever is smaller, chunks from the 1.282 + // list, setting "fl", which is required to be empty, to point to them. 1.283 + void getFirstNChunksFromList(size_t n, FreeList* fl); 1.284 + 1.285 + // Unlink this chunk from it's free list 1.286 + void removeChunk(FreeChunk* fc); 1.287 + 1.288 + // Add this chunk to this free list. 1.289 + void returnChunkAtHead(FreeChunk* fc); 1.290 + void returnChunkAtTail(FreeChunk* fc); 1.291 + 1.292 + // Similar to returnChunk* but also records some diagnostic 1.293 + // information. 1.294 + void returnChunkAtHead(FreeChunk* fc, bool record_return); 1.295 + void returnChunkAtTail(FreeChunk* fc, bool record_return); 1.296 + 1.297 + // Prepend "fl" (whose size is required to be the same as that of "this") 1.298 + // to the front of "this" list. 1.299 + void prepend(FreeList* fl); 1.300 + 1.301 + // Verify that the chunk is in the list. 1.302 + // found. Return NULL if "fc" is not found. 1.303 + bool verifyChunkInFreeLists(FreeChunk* fc) const; 1.304 +};