Fri, 29 Feb 2008 14:42:56 -0800
6668743: CMS: Consolidate block statistics reporting code
Summary: Reduce the amount of related code replication and improve pretty printing.
Reviewed-by: jmasa
1 /*
2 * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 class CompactibleFreeListSpace;
27 // A class for maintaining a free list of FreeChunk's. The FreeList
28 // maintains a the structure of the list (head, tail, etc.) plus
29 // statistics for allocations from the list. The links between items
30 // are not part of FreeList. The statistics are
31 // used to make decisions about coalescing FreeChunk's when they
32 // are swept during collection.
33 //
34 // See the corresponding .cpp file for a description of the specifics
35 // for that implementation.
37 class Mutex;
39 class FreeList VALUE_OBJ_CLASS_SPEC {
40 friend class CompactibleFreeListSpace;
41 friend class printTreeCensusClosure;
42 FreeChunk* _head; // List of free chunks
43 FreeChunk* _tail; // Tail of list of free chunks
44 size_t _size; // Size in Heap words of each chunks
45 ssize_t _count; // Number of entries in list
46 size_t _hint; // next larger size list with a positive surplus
48 AllocationStats _allocation_stats; // statistics for smart allocation
50 #ifdef ASSERT
51 Mutex* _protecting_lock;
52 #endif
54 // Asserts false if the protecting lock (if any) is not held.
55 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
56 void assert_proper_lock_protection() const {
57 #ifdef ASSERT
58 if (_protecting_lock != NULL)
59 assert_proper_lock_protection_work();
60 #endif
61 }
63 // Initialize the allocation statistics.
64 protected:
65 void init_statistics();
66 void set_count(ssize_t v) { _count = v;}
67 void increment_count() { _count++; }
68 void decrement_count() {
69 _count--;
70 assert(_count >= 0, "Count should not be negative");
71 }
73 public:
74 // Constructor
75 // Construct a list without any entries.
76 FreeList();
77 // Construct a list with "fc" as the first (and lone) entry in the list.
78 FreeList(FreeChunk* fc);
79 // Construct a list which will have a FreeChunk at address "addr" and
80 // of size "size" as the first (and lone) entry in the list.
81 FreeList(HeapWord* addr, size_t size);
83 // Reset the head, tail, hint, and count of a free list.
84 void reset(size_t hint);
86 // Declare the current free list to be protected by the given lock.
87 #ifdef ASSERT
88 void set_protecting_lock(Mutex* protecting_lock) {
89 _protecting_lock = protecting_lock;
90 }
91 #endif
93 // Accessors.
94 FreeChunk* head() const {
95 assert_proper_lock_protection();
96 return _head;
97 }
98 void set_head(FreeChunk* v) {
99 assert_proper_lock_protection();
100 _head = v;
101 assert(!_head || _head->size() == _size, "bad chunk size");
102 }
103 // Set the head of the list and set the prev field of non-null
104 // values to NULL.
105 void link_head(FreeChunk* v) {
106 assert_proper_lock_protection();
107 set_head(v);
108 // If this method is not used (just set the head instead),
109 // this check can be avoided.
110 if (v != NULL) {
111 v->linkPrev(NULL);
112 }
113 }
115 FreeChunk* tail() const {
116 assert_proper_lock_protection();
117 return _tail;
118 }
119 void set_tail(FreeChunk* v) {
120 assert_proper_lock_protection();
121 _tail = v;
122 assert(!_tail || _tail->size() == _size, "bad chunk size");
123 }
124 // Set the tail of the list and set the next field of non-null
125 // values to NULL.
126 void link_tail(FreeChunk* v) {
127 assert_proper_lock_protection();
128 set_tail(v);
129 if (v != NULL) {
130 v->clearNext();
131 }
132 }
134 // No locking checks in read-accessors: lock-free reads (only) are benign.
135 // Readers are expected to have the lock if they are doing work that
136 // requires atomicity guarantees in sections of code.
137 size_t size() const {
138 return _size;
139 }
140 void set_size(size_t v) {
141 assert_proper_lock_protection();
142 _size = v;
143 }
144 ssize_t count() const {
145 return _count;
146 }
147 size_t hint() const {
148 return _hint;
149 }
150 void set_hint(size_t v) {
151 assert_proper_lock_protection();
152 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
153 }
155 // Accessors for statistics
156 AllocationStats* allocation_stats() {
157 assert_proper_lock_protection();
158 return &_allocation_stats;
159 }
161 ssize_t desired() const {
162 return _allocation_stats.desired();
163 }
164 void set_desired(ssize_t v) {
165 assert_proper_lock_protection();
166 _allocation_stats.set_desired(v);
167 }
168 void compute_desired(float inter_sweep_current,
169 float inter_sweep_estimate) {
170 assert_proper_lock_protection();
171 _allocation_stats.compute_desired(_count,
172 inter_sweep_current,
173 inter_sweep_estimate);
174 }
175 ssize_t coalDesired() const {
176 return _allocation_stats.coalDesired();
177 }
178 void set_coalDesired(ssize_t v) {
179 assert_proper_lock_protection();
180 _allocation_stats.set_coalDesired(v);
181 }
183 ssize_t surplus() const {
184 return _allocation_stats.surplus();
185 }
186 void set_surplus(ssize_t v) {
187 assert_proper_lock_protection();
188 _allocation_stats.set_surplus(v);
189 }
190 void increment_surplus() {
191 assert_proper_lock_protection();
192 _allocation_stats.increment_surplus();
193 }
194 void decrement_surplus() {
195 assert_proper_lock_protection();
196 _allocation_stats.decrement_surplus();
197 }
199 ssize_t bfrSurp() const {
200 return _allocation_stats.bfrSurp();
201 }
202 void set_bfrSurp(ssize_t v) {
203 assert_proper_lock_protection();
204 _allocation_stats.set_bfrSurp(v);
205 }
206 ssize_t prevSweep() const {
207 return _allocation_stats.prevSweep();
208 }
209 void set_prevSweep(ssize_t v) {
210 assert_proper_lock_protection();
211 _allocation_stats.set_prevSweep(v);
212 }
213 ssize_t beforeSweep() const {
214 return _allocation_stats.beforeSweep();
215 }
216 void set_beforeSweep(ssize_t v) {
217 assert_proper_lock_protection();
218 _allocation_stats.set_beforeSweep(v);
219 }
221 ssize_t coalBirths() const {
222 return _allocation_stats.coalBirths();
223 }
224 void set_coalBirths(ssize_t v) {
225 assert_proper_lock_protection();
226 _allocation_stats.set_coalBirths(v);
227 }
228 void increment_coalBirths() {
229 assert_proper_lock_protection();
230 _allocation_stats.increment_coalBirths();
231 }
233 ssize_t coalDeaths() const {
234 return _allocation_stats.coalDeaths();
235 }
236 void set_coalDeaths(ssize_t v) {
237 assert_proper_lock_protection();
238 _allocation_stats.set_coalDeaths(v);
239 }
240 void increment_coalDeaths() {
241 assert_proper_lock_protection();
242 _allocation_stats.increment_coalDeaths();
243 }
245 ssize_t splitBirths() const {
246 return _allocation_stats.splitBirths();
247 }
248 void set_splitBirths(ssize_t v) {
249 assert_proper_lock_protection();
250 _allocation_stats.set_splitBirths(v);
251 }
252 void increment_splitBirths() {
253 assert_proper_lock_protection();
254 _allocation_stats.increment_splitBirths();
255 }
257 ssize_t splitDeaths() const {
258 return _allocation_stats.splitDeaths();
259 }
260 void set_splitDeaths(ssize_t v) {
261 assert_proper_lock_protection();
262 _allocation_stats.set_splitDeaths(v);
263 }
264 void increment_splitDeaths() {
265 assert_proper_lock_protection();
266 _allocation_stats.increment_splitDeaths();
267 }
269 NOT_PRODUCT(
270 // For debugging. The "_returnedBytes" in all the lists are summed
271 // and compared with the total number of bytes swept during a
272 // collection.
273 size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
274 void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
275 void increment_returnedBytes_by(size_t v) {
276 _allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
277 }
278 )
280 // Unlink head of list and return it. Returns NULL if
281 // the list is empty.
282 FreeChunk* getChunkAtHead();
284 // Remove the first "n" or "count", whichever is smaller, chunks from the
285 // list, setting "fl", which is required to be empty, to point to them.
286 void getFirstNChunksFromList(size_t n, FreeList* fl);
288 // Unlink this chunk from it's free list
289 void removeChunk(FreeChunk* fc);
291 // Add this chunk to this free list.
292 void returnChunkAtHead(FreeChunk* fc);
293 void returnChunkAtTail(FreeChunk* fc);
295 // Similar to returnChunk* but also records some diagnostic
296 // information.
297 void returnChunkAtHead(FreeChunk* fc, bool record_return);
298 void returnChunkAtTail(FreeChunk* fc, bool record_return);
300 // Prepend "fl" (whose size is required to be the same as that of "this")
301 // to the front of "this" list.
302 void prepend(FreeList* fl);
304 // Verify that the chunk is in the list.
305 // found. Return NULL if "fc" is not found.
306 bool verifyChunkInFreeLists(FreeChunk* fc) const;
308 // Printing support
309 static void print_labels_on(outputStream* st, const char* c);
310 void print_on(outputStream* st, const char* c = NULL) const;
311 };