duke@435: /* xdono@631: * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: class CompactibleFreeListSpace; duke@435: duke@435: // A class for maintaining a free list of FreeChunk's. The FreeList duke@435: // maintains a the structure of the list (head, tail, etc.) plus duke@435: // statistics for allocations from the list. The links between items duke@435: // are not part of FreeList. The statistics are duke@435: // used to make decisions about coalescing FreeChunk's when they duke@435: // are swept during collection. duke@435: // duke@435: // See the corresponding .cpp file for a description of the specifics duke@435: // for that implementation. duke@435: duke@435: class Mutex; duke@435: duke@435: class FreeList VALUE_OBJ_CLASS_SPEC { duke@435: friend class CompactibleFreeListSpace; dcubed@587: friend class VMStructs; ysr@447: friend class printTreeCensusClosure; duke@435: FreeChunk* _head; // List of free chunks duke@435: FreeChunk* _tail; // Tail of list of free chunks duke@435: size_t _size; // Size in Heap words of each chunks duke@435: ssize_t _count; // Number of entries in list duke@435: size_t _hint; // next larger size list with a positive surplus duke@435: duke@435: AllocationStats _allocation_stats; // statistics for smart allocation duke@435: duke@435: #ifdef ASSERT duke@435: Mutex* _protecting_lock; duke@435: #endif duke@435: duke@435: // Asserts false if the protecting lock (if any) is not held. duke@435: void assert_proper_lock_protection_work() const PRODUCT_RETURN; duke@435: void assert_proper_lock_protection() const { duke@435: #ifdef ASSERT duke@435: if (_protecting_lock != NULL) duke@435: assert_proper_lock_protection_work(); duke@435: #endif duke@435: } duke@435: duke@435: // Initialize the allocation statistics. duke@435: protected: duke@435: void init_statistics(); duke@435: void set_count(ssize_t v) { _count = v;} ysr@447: void increment_count() { _count++; } duke@435: void decrement_count() { duke@435: _count--; ysr@447: assert(_count >= 0, "Count should not be negative"); ysr@447: } duke@435: duke@435: public: duke@435: // Constructor duke@435: // Construct a list without any entries. duke@435: FreeList(); duke@435: // Construct a list with "fc" as the first (and lone) entry in the list. duke@435: FreeList(FreeChunk* fc); duke@435: // Construct a list which will have a FreeChunk at address "addr" and duke@435: // of size "size" as the first (and lone) entry in the list. duke@435: FreeList(HeapWord* addr, size_t size); duke@435: duke@435: // Reset the head, tail, hint, and count of a free list. duke@435: void reset(size_t hint); duke@435: duke@435: // Declare the current free list to be protected by the given lock. duke@435: #ifdef ASSERT duke@435: void set_protecting_lock(Mutex* protecting_lock) { duke@435: _protecting_lock = protecting_lock; duke@435: } duke@435: #endif duke@435: duke@435: // Accessors. duke@435: FreeChunk* head() const { duke@435: assert_proper_lock_protection(); duke@435: return _head; duke@435: } duke@435: void set_head(FreeChunk* v) { duke@435: assert_proper_lock_protection(); duke@435: _head = v; duke@435: assert(!_head || _head->size() == _size, "bad chunk size"); duke@435: } duke@435: // Set the head of the list and set the prev field of non-null duke@435: // values to NULL. duke@435: void link_head(FreeChunk* v) { duke@435: assert_proper_lock_protection(); duke@435: set_head(v); duke@435: // If this method is not used (just set the head instead), duke@435: // this check can be avoided. duke@435: if (v != NULL) { duke@435: v->linkPrev(NULL); duke@435: } duke@435: } duke@435: duke@435: FreeChunk* tail() const { duke@435: assert_proper_lock_protection(); duke@435: return _tail; duke@435: } duke@435: void set_tail(FreeChunk* v) { duke@435: assert_proper_lock_protection(); duke@435: _tail = v; duke@435: assert(!_tail || _tail->size() == _size, "bad chunk size"); duke@435: } duke@435: // Set the tail of the list and set the next field of non-null duke@435: // values to NULL. duke@435: void link_tail(FreeChunk* v) { duke@435: assert_proper_lock_protection(); duke@435: set_tail(v); duke@435: if (v != NULL) { duke@435: v->clearNext(); duke@435: } duke@435: } duke@435: duke@435: // No locking checks in read-accessors: lock-free reads (only) are benign. duke@435: // Readers are expected to have the lock if they are doing work that duke@435: // requires atomicity guarantees in sections of code. duke@435: size_t size() const { duke@435: return _size; duke@435: } duke@435: void set_size(size_t v) { duke@435: assert_proper_lock_protection(); duke@435: _size = v; duke@435: } duke@435: ssize_t count() const { duke@435: return _count; duke@435: } duke@435: size_t hint() const { duke@435: return _hint; duke@435: } duke@435: void set_hint(size_t v) { duke@435: assert_proper_lock_protection(); duke@435: assert(v == 0 || _size < v, "Bad hint"); _hint = v; duke@435: } duke@435: duke@435: // Accessors for statistics duke@435: AllocationStats* allocation_stats() { duke@435: assert_proper_lock_protection(); duke@435: return &_allocation_stats; duke@435: } duke@435: duke@435: ssize_t desired() const { duke@435: return _allocation_stats.desired(); duke@435: } ysr@447: void set_desired(ssize_t v) { ysr@447: assert_proper_lock_protection(); ysr@447: _allocation_stats.set_desired(v); ysr@447: } duke@435: void compute_desired(float inter_sweep_current, duke@435: float inter_sweep_estimate) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.compute_desired(_count, duke@435: inter_sweep_current, duke@435: inter_sweep_estimate); duke@435: } duke@435: ssize_t coalDesired() const { duke@435: return _allocation_stats.coalDesired(); duke@435: } duke@435: void set_coalDesired(ssize_t v) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.set_coalDesired(v); duke@435: } duke@435: duke@435: ssize_t surplus() const { duke@435: return _allocation_stats.surplus(); duke@435: } duke@435: void set_surplus(ssize_t v) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.set_surplus(v); duke@435: } duke@435: void increment_surplus() { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.increment_surplus(); duke@435: } duke@435: void decrement_surplus() { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.decrement_surplus(); duke@435: } duke@435: duke@435: ssize_t bfrSurp() const { duke@435: return _allocation_stats.bfrSurp(); duke@435: } duke@435: void set_bfrSurp(ssize_t v) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.set_bfrSurp(v); duke@435: } duke@435: ssize_t prevSweep() const { duke@435: return _allocation_stats.prevSweep(); duke@435: } duke@435: void set_prevSweep(ssize_t v) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.set_prevSweep(v); duke@435: } duke@435: ssize_t beforeSweep() const { duke@435: return _allocation_stats.beforeSweep(); duke@435: } duke@435: void set_beforeSweep(ssize_t v) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.set_beforeSweep(v); duke@435: } duke@435: duke@435: ssize_t coalBirths() const { duke@435: return _allocation_stats.coalBirths(); duke@435: } duke@435: void set_coalBirths(ssize_t v) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.set_coalBirths(v); duke@435: } duke@435: void increment_coalBirths() { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.increment_coalBirths(); duke@435: } duke@435: duke@435: ssize_t coalDeaths() const { duke@435: return _allocation_stats.coalDeaths(); duke@435: } duke@435: void set_coalDeaths(ssize_t v) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.set_coalDeaths(v); duke@435: } duke@435: void increment_coalDeaths() { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.increment_coalDeaths(); duke@435: } duke@435: duke@435: ssize_t splitBirths() const { duke@435: return _allocation_stats.splitBirths(); duke@435: } duke@435: void set_splitBirths(ssize_t v) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.set_splitBirths(v); duke@435: } duke@435: void increment_splitBirths() { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.increment_splitBirths(); duke@435: } duke@435: duke@435: ssize_t splitDeaths() const { duke@435: return _allocation_stats.splitDeaths(); duke@435: } duke@435: void set_splitDeaths(ssize_t v) { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.set_splitDeaths(v); duke@435: } duke@435: void increment_splitDeaths() { duke@435: assert_proper_lock_protection(); duke@435: _allocation_stats.increment_splitDeaths(); duke@435: } duke@435: duke@435: NOT_PRODUCT( duke@435: // For debugging. The "_returnedBytes" in all the lists are summed duke@435: // and compared with the total number of bytes swept during a duke@435: // collection. duke@435: size_t returnedBytes() const { return _allocation_stats.returnedBytes(); } duke@435: void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); } duke@435: void increment_returnedBytes_by(size_t v) { duke@435: _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v); duke@435: } duke@435: ) duke@435: duke@435: // Unlink head of list and return it. Returns NULL if duke@435: // the list is empty. duke@435: FreeChunk* getChunkAtHead(); duke@435: duke@435: // Remove the first "n" or "count", whichever is smaller, chunks from the duke@435: // list, setting "fl", which is required to be empty, to point to them. duke@435: void getFirstNChunksFromList(size_t n, FreeList* fl); duke@435: duke@435: // Unlink this chunk from it's free list duke@435: void removeChunk(FreeChunk* fc); duke@435: duke@435: // Add this chunk to this free list. duke@435: void returnChunkAtHead(FreeChunk* fc); duke@435: void returnChunkAtTail(FreeChunk* fc); duke@435: duke@435: // Similar to returnChunk* but also records some diagnostic duke@435: // information. duke@435: void returnChunkAtHead(FreeChunk* fc, bool record_return); duke@435: void returnChunkAtTail(FreeChunk* fc, bool record_return); duke@435: duke@435: // Prepend "fl" (whose size is required to be the same as that of "this") duke@435: // to the front of "this" list. duke@435: void prepend(FreeList* fl); duke@435: duke@435: // Verify that the chunk is in the list. duke@435: // found. Return NULL if "fc" is not found. duke@435: bool verifyChunkInFreeLists(FreeChunk* fc) const; ysr@447: ysr@447: // Printing support ysr@447: static void print_labels_on(outputStream* st, const char* c); ysr@447: void print_on(outputStream* st, const char* c = NULL) const; duke@435: };