Wed, 23 Sep 2009 23:56:15 -0700
Merge
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 class CompactibleFreeListSpace;
27 // A class for maintaining a free list of FreeChunk's. The FreeList
28 // maintains a the structure of the list (head, tail, etc.) plus
29 // statistics for allocations from the list. The links between items
30 // are not part of FreeList. The statistics are
31 // used to make decisions about coalescing FreeChunk's when they
32 // are swept during collection.
33 //
34 // See the corresponding .cpp file for a description of the specifics
35 // for that implementation.
37 class Mutex;
39 class FreeList VALUE_OBJ_CLASS_SPEC {
40 friend class CompactibleFreeListSpace;
41 friend class VMStructs;
42 friend class printTreeCensusClosure;
43 FreeChunk* _head; // List of free chunks
44 FreeChunk* _tail; // Tail of list of free chunks
45 size_t _size; // Size in Heap words of each chunks
46 ssize_t _count; // Number of entries in list
47 size_t _hint; // next larger size list with a positive surplus
49 AllocationStats _allocation_stats; // statistics for smart allocation
51 #ifdef ASSERT
52 Mutex* _protecting_lock;
53 #endif
55 // Asserts false if the protecting lock (if any) is not held.
56 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
57 void assert_proper_lock_protection() const {
58 #ifdef ASSERT
59 if (_protecting_lock != NULL)
60 assert_proper_lock_protection_work();
61 #endif
62 }
64 // Initialize the allocation statistics.
65 protected:
66 void init_statistics();
67 void set_count(ssize_t v) { _count = v;}
68 void increment_count() { _count++; }
69 void decrement_count() {
70 _count--;
71 assert(_count >= 0, "Count should not be negative");
72 }
74 public:
75 // Constructor
76 // Construct a list without any entries.
77 FreeList();
78 // Construct a list with "fc" as the first (and lone) entry in the list.
79 FreeList(FreeChunk* fc);
80 // Construct a list which will have a FreeChunk at address "addr" and
81 // of size "size" as the first (and lone) entry in the list.
82 FreeList(HeapWord* addr, size_t size);
84 // Reset the head, tail, hint, and count of a free list.
85 void reset(size_t hint);
87 // Declare the current free list to be protected by the given lock.
88 #ifdef ASSERT
89 void set_protecting_lock(Mutex* protecting_lock) {
90 _protecting_lock = protecting_lock;
91 }
92 #endif
94 // Accessors.
95 FreeChunk* head() const {
96 assert_proper_lock_protection();
97 return _head;
98 }
99 void set_head(FreeChunk* v) {
100 assert_proper_lock_protection();
101 _head = v;
102 assert(!_head || _head->size() == _size, "bad chunk size");
103 }
104 // Set the head of the list and set the prev field of non-null
105 // values to NULL.
106 void link_head(FreeChunk* v) {
107 assert_proper_lock_protection();
108 set_head(v);
109 // If this method is not used (just set the head instead),
110 // this check can be avoided.
111 if (v != NULL) {
112 v->linkPrev(NULL);
113 }
114 }
116 FreeChunk* tail() const {
117 assert_proper_lock_protection();
118 return _tail;
119 }
120 void set_tail(FreeChunk* v) {
121 assert_proper_lock_protection();
122 _tail = v;
123 assert(!_tail || _tail->size() == _size, "bad chunk size");
124 }
125 // Set the tail of the list and set the next field of non-null
126 // values to NULL.
127 void link_tail(FreeChunk* v) {
128 assert_proper_lock_protection();
129 set_tail(v);
130 if (v != NULL) {
131 v->clearNext();
132 }
133 }
135 // No locking checks in read-accessors: lock-free reads (only) are benign.
136 // Readers are expected to have the lock if they are doing work that
137 // requires atomicity guarantees in sections of code.
138 size_t size() const {
139 return _size;
140 }
141 void set_size(size_t v) {
142 assert_proper_lock_protection();
143 _size = v;
144 }
145 ssize_t count() const {
146 return _count;
147 }
148 size_t hint() const {
149 return _hint;
150 }
151 void set_hint(size_t v) {
152 assert_proper_lock_protection();
153 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
154 }
156 // Accessors for statistics
157 AllocationStats* allocation_stats() {
158 assert_proper_lock_protection();
159 return &_allocation_stats;
160 }
162 ssize_t desired() const {
163 return _allocation_stats.desired();
164 }
165 void set_desired(ssize_t v) {
166 assert_proper_lock_protection();
167 _allocation_stats.set_desired(v);
168 }
169 void compute_desired(float inter_sweep_current,
170 float inter_sweep_estimate) {
171 assert_proper_lock_protection();
172 _allocation_stats.compute_desired(_count,
173 inter_sweep_current,
174 inter_sweep_estimate);
175 }
176 ssize_t coalDesired() const {
177 return _allocation_stats.coalDesired();
178 }
179 void set_coalDesired(ssize_t v) {
180 assert_proper_lock_protection();
181 _allocation_stats.set_coalDesired(v);
182 }
184 ssize_t surplus() const {
185 return _allocation_stats.surplus();
186 }
187 void set_surplus(ssize_t v) {
188 assert_proper_lock_protection();
189 _allocation_stats.set_surplus(v);
190 }
191 void increment_surplus() {
192 assert_proper_lock_protection();
193 _allocation_stats.increment_surplus();
194 }
195 void decrement_surplus() {
196 assert_proper_lock_protection();
197 _allocation_stats.decrement_surplus();
198 }
200 ssize_t bfrSurp() const {
201 return _allocation_stats.bfrSurp();
202 }
203 void set_bfrSurp(ssize_t v) {
204 assert_proper_lock_protection();
205 _allocation_stats.set_bfrSurp(v);
206 }
207 ssize_t prevSweep() const {
208 return _allocation_stats.prevSweep();
209 }
210 void set_prevSweep(ssize_t v) {
211 assert_proper_lock_protection();
212 _allocation_stats.set_prevSweep(v);
213 }
214 ssize_t beforeSweep() const {
215 return _allocation_stats.beforeSweep();
216 }
217 void set_beforeSweep(ssize_t v) {
218 assert_proper_lock_protection();
219 _allocation_stats.set_beforeSweep(v);
220 }
222 ssize_t coalBirths() const {
223 return _allocation_stats.coalBirths();
224 }
225 void set_coalBirths(ssize_t v) {
226 assert_proper_lock_protection();
227 _allocation_stats.set_coalBirths(v);
228 }
229 void increment_coalBirths() {
230 assert_proper_lock_protection();
231 _allocation_stats.increment_coalBirths();
232 }
234 ssize_t coalDeaths() const {
235 return _allocation_stats.coalDeaths();
236 }
237 void set_coalDeaths(ssize_t v) {
238 assert_proper_lock_protection();
239 _allocation_stats.set_coalDeaths(v);
240 }
241 void increment_coalDeaths() {
242 assert_proper_lock_protection();
243 _allocation_stats.increment_coalDeaths();
244 }
246 ssize_t splitBirths() const {
247 return _allocation_stats.splitBirths();
248 }
249 void set_splitBirths(ssize_t v) {
250 assert_proper_lock_protection();
251 _allocation_stats.set_splitBirths(v);
252 }
253 void increment_splitBirths() {
254 assert_proper_lock_protection();
255 _allocation_stats.increment_splitBirths();
256 }
258 ssize_t splitDeaths() const {
259 return _allocation_stats.splitDeaths();
260 }
261 void set_splitDeaths(ssize_t v) {
262 assert_proper_lock_protection();
263 _allocation_stats.set_splitDeaths(v);
264 }
265 void increment_splitDeaths() {
266 assert_proper_lock_protection();
267 _allocation_stats.increment_splitDeaths();
268 }
270 NOT_PRODUCT(
271 // For debugging. The "_returnedBytes" in all the lists are summed
272 // and compared with the total number of bytes swept during a
273 // collection.
274 size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
275 void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
276 void increment_returnedBytes_by(size_t v) {
277 _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
278 }
279 )
281 // Unlink head of list and return it. Returns NULL if
282 // the list is empty.
283 FreeChunk* getChunkAtHead();
285 // Remove the first "n" or "count", whichever is smaller, chunks from the
286 // list, setting "fl", which is required to be empty, to point to them.
287 void getFirstNChunksFromList(size_t n, FreeList* fl);
289 // Unlink this chunk from it's free list
290 void removeChunk(FreeChunk* fc);
292 // Add this chunk to this free list.
293 void returnChunkAtHead(FreeChunk* fc);
294 void returnChunkAtTail(FreeChunk* fc);
296 // Similar to returnChunk* but also records some diagnostic
297 // information.
298 void returnChunkAtHead(FreeChunk* fc, bool record_return);
299 void returnChunkAtTail(FreeChunk* fc, bool record_return);
301 // Prepend "fl" (whose size is required to be the same as that of "this")
302 // to the front of "this" list.
303 void prepend(FreeList* fl);
305 // Verify that the chunk is in the list.
306 // found. Return NULL if "fc" is not found.
307 bool verifyChunkInFreeLists(FreeChunk* fc) const;
309 // Printing support
310 static void print_labels_on(outputStream* st, const char* c);
311 void print_on(outputStream* st, const char* c = NULL) const;
312 };