src/share/vm/memory/freeList.hpp

Tue, 11 Sep 2012 14:59:23 +0200

author
stefank
date
Tue, 11 Sep 2012 14:59:23 +0200
changeset 4050
ec98e58952b2
parent 3732
f69a5d43dc19
child 4153
b9a9ed0f8eeb
permissions
-rw-r--r--

7197350: NPG: jvmtiHeapReferenceCallback receives incorrect reference_kind for system class roots
Summary: Fix the iteration over the system classes and report the correct reference kind.
Reviewed-by: coleenp, rbackman

duke@435 1 /*
stefank@2314 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
jmasa@3730 25 #ifndef SHARE_VM_MEMORY_FREELIST_HPP
jmasa@3730 26 #define SHARE_VM_MEMORY_FREELIST_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/shared/allocationStats.hpp"
stefank@2314 29
duke@435 30 class CompactibleFreeListSpace;
duke@435 31
jmasa@3730 32 // A class for maintaining a free list of Chunk's. The FreeList
duke@435 33 // maintains a the structure of the list (head, tail, etc.) plus
duke@435 34 // statistics for allocations from the list. The links between items
duke@435 35 // are not part of FreeList. The statistics are
jmasa@3730 36 // used to make decisions about coalescing Chunk's when they
duke@435 37 // are swept during collection.
duke@435 38 //
duke@435 39 // See the corresponding .cpp file for a description of the specifics
duke@435 40 // for that implementation.
duke@435 41
duke@435 42 class Mutex;
jmasa@3730 43 template <class Chunk> class TreeList;
jmasa@3730 44 template <class Chunk> class PrintTreeCensusClosure;
duke@435 45
jmasa@3730 46 template <class Chunk>
duke@435 47 class FreeList VALUE_OBJ_CLASS_SPEC {
duke@435 48 friend class CompactibleFreeListSpace;
dcubed@587 49 friend class VMStructs;
jmasa@3730 50 friend class PrintTreeCensusClosure<Chunk>;
ysr@1580 51
ysr@1580 52 private:
jmasa@3730 53 Chunk* _head; // Head of list of free chunks
jmasa@3730 54 Chunk* _tail; // Tail of list of free chunks
ysr@1580 55 size_t _size; // Size in Heap words of each chunk
duke@435 56 ssize_t _count; // Number of entries in list
duke@435 57 size_t _hint; // next larger size list with a positive surplus
duke@435 58
ysr@1580 59 AllocationStats _allocation_stats; // allocation-related statistics
duke@435 60
duke@435 61 #ifdef ASSERT
duke@435 62 Mutex* _protecting_lock;
duke@435 63 #endif
duke@435 64
duke@435 65 // Asserts false if the protecting lock (if any) is not held.
duke@435 66 void assert_proper_lock_protection_work() const PRODUCT_RETURN;
duke@435 67 void assert_proper_lock_protection() const {
duke@435 68 #ifdef ASSERT
duke@435 69 if (_protecting_lock != NULL)
duke@435 70 assert_proper_lock_protection_work();
duke@435 71 #endif
duke@435 72 }
duke@435 73
duke@435 74 // Initialize the allocation statistics.
duke@435 75 protected:
ysr@1580 76 void init_statistics(bool split_birth = false);
duke@435 77 void set_count(ssize_t v) { _count = v;}
ysr@1580 78 void increment_count() {
ysr@1580 79 _count++;
ysr@1580 80 }
ysr@1580 81
duke@435 82 void decrement_count() {
duke@435 83 _count--;
ysr@447 84 assert(_count >= 0, "Count should not be negative");
ysr@447 85 }
duke@435 86
duke@435 87 public:
duke@435 88 // Constructor
duke@435 89 // Construct a list without any entries.
duke@435 90 FreeList();
duke@435 91 // Construct a list with "fc" as the first (and lone) entry in the list.
jmasa@3730 92 FreeList(Chunk* fc);
duke@435 93
duke@435 94 // Reset the head, tail, hint, and count of a free list.
duke@435 95 void reset(size_t hint);
duke@435 96
duke@435 97 // Declare the current free list to be protected by the given lock.
duke@435 98 #ifdef ASSERT
duke@435 99 void set_protecting_lock(Mutex* protecting_lock) {
duke@435 100 _protecting_lock = protecting_lock;
duke@435 101 }
duke@435 102 #endif
duke@435 103
duke@435 104 // Accessors.
jmasa@3730 105 Chunk* head() const {
duke@435 106 assert_proper_lock_protection();
duke@435 107 return _head;
duke@435 108 }
jmasa@3730 109 void set_head(Chunk* v) {
duke@435 110 assert_proper_lock_protection();
duke@435 111 _head = v;
duke@435 112 assert(!_head || _head->size() == _size, "bad chunk size");
duke@435 113 }
duke@435 114 // Set the head of the list and set the prev field of non-null
duke@435 115 // values to NULL.
jmasa@3730 116 void link_head(Chunk* v) {
duke@435 117 assert_proper_lock_protection();
duke@435 118 set_head(v);
duke@435 119 // If this method is not used (just set the head instead),
duke@435 120 // this check can be avoided.
duke@435 121 if (v != NULL) {
jmasa@3732 122 v->link_prev(NULL);
duke@435 123 }
duke@435 124 }
duke@435 125
jmasa@3730 126 Chunk* tail() const {
duke@435 127 assert_proper_lock_protection();
duke@435 128 return _tail;
duke@435 129 }
jmasa@3730 130 void set_tail(Chunk* v) {
duke@435 131 assert_proper_lock_protection();
duke@435 132 _tail = v;
duke@435 133 assert(!_tail || _tail->size() == _size, "bad chunk size");
duke@435 134 }
duke@435 135 // Set the tail of the list and set the next field of non-null
duke@435 136 // values to NULL.
jmasa@3730 137 void link_tail(Chunk* v) {
duke@435 138 assert_proper_lock_protection();
duke@435 139 set_tail(v);
duke@435 140 if (v != NULL) {
jmasa@3732 141 v->clear_next();
duke@435 142 }
duke@435 143 }
duke@435 144
duke@435 145 // No locking checks in read-accessors: lock-free reads (only) are benign.
duke@435 146 // Readers are expected to have the lock if they are doing work that
duke@435 147 // requires atomicity guarantees in sections of code.
duke@435 148 size_t size() const {
duke@435 149 return _size;
duke@435 150 }
duke@435 151 void set_size(size_t v) {
duke@435 152 assert_proper_lock_protection();
duke@435 153 _size = v;
duke@435 154 }
duke@435 155 ssize_t count() const {
duke@435 156 return _count;
duke@435 157 }
duke@435 158 size_t hint() const {
duke@435 159 return _hint;
duke@435 160 }
duke@435 161 void set_hint(size_t v) {
duke@435 162 assert_proper_lock_protection();
duke@435 163 assert(v == 0 || _size < v, "Bad hint"); _hint = v;
duke@435 164 }
duke@435 165
duke@435 166 // Accessors for statistics
duke@435 167 AllocationStats* allocation_stats() {
duke@435 168 assert_proper_lock_protection();
duke@435 169 return &_allocation_stats;
duke@435 170 }
duke@435 171
duke@435 172 ssize_t desired() const {
duke@435 173 return _allocation_stats.desired();
duke@435 174 }
ysr@447 175 void set_desired(ssize_t v) {
ysr@447 176 assert_proper_lock_protection();
ysr@447 177 _allocation_stats.set_desired(v);
ysr@447 178 }
duke@435 179 void compute_desired(float inter_sweep_current,
ysr@1580 180 float inter_sweep_estimate,
ysr@1580 181 float intra_sweep_estimate) {
duke@435 182 assert_proper_lock_protection();
duke@435 183 _allocation_stats.compute_desired(_count,
duke@435 184 inter_sweep_current,
ysr@1580 185 inter_sweep_estimate,
ysr@1580 186 intra_sweep_estimate);
duke@435 187 }
jmasa@3732 188 ssize_t coal_desired() const {
jmasa@3732 189 return _allocation_stats.coal_desired();
duke@435 190 }
jmasa@3732 191 void set_coal_desired(ssize_t v) {
duke@435 192 assert_proper_lock_protection();
jmasa@3732 193 _allocation_stats.set_coal_desired(v);
duke@435 194 }
duke@435 195
duke@435 196 ssize_t surplus() const {
duke@435 197 return _allocation_stats.surplus();
duke@435 198 }
duke@435 199 void set_surplus(ssize_t v) {
duke@435 200 assert_proper_lock_protection();
duke@435 201 _allocation_stats.set_surplus(v);
duke@435 202 }
duke@435 203 void increment_surplus() {
duke@435 204 assert_proper_lock_protection();
duke@435 205 _allocation_stats.increment_surplus();
duke@435 206 }
duke@435 207 void decrement_surplus() {
duke@435 208 assert_proper_lock_protection();
duke@435 209 _allocation_stats.decrement_surplus();
duke@435 210 }
duke@435 211
jmasa@3732 212 ssize_t bfr_surp() const {
jmasa@3732 213 return _allocation_stats.bfr_surp();
duke@435 214 }
jmasa@3732 215 void set_bfr_surp(ssize_t v) {
duke@435 216 assert_proper_lock_protection();
jmasa@3732 217 _allocation_stats.set_bfr_surp(v);
duke@435 218 }
jmasa@3732 219 ssize_t prev_sweep() const {
jmasa@3732 220 return _allocation_stats.prev_sweep();
duke@435 221 }
jmasa@3732 222 void set_prev_sweep(ssize_t v) {
duke@435 223 assert_proper_lock_protection();
jmasa@3732 224 _allocation_stats.set_prev_sweep(v);
duke@435 225 }
jmasa@3732 226 ssize_t before_sweep() const {
jmasa@3732 227 return _allocation_stats.before_sweep();
duke@435 228 }
jmasa@3732 229 void set_before_sweep(ssize_t v) {
duke@435 230 assert_proper_lock_protection();
jmasa@3732 231 _allocation_stats.set_before_sweep(v);
duke@435 232 }
duke@435 233
jmasa@3732 234 ssize_t coal_births() const {
jmasa@3732 235 return _allocation_stats.coal_births();
duke@435 236 }
jmasa@3732 237 void set_coal_births(ssize_t v) {
duke@435 238 assert_proper_lock_protection();
jmasa@3732 239 _allocation_stats.set_coal_births(v);
duke@435 240 }
jmasa@3732 241 void increment_coal_births() {
duke@435 242 assert_proper_lock_protection();
jmasa@3732 243 _allocation_stats.increment_coal_births();
duke@435 244 }
duke@435 245
jmasa@3732 246 ssize_t coal_deaths() const {
jmasa@3732 247 return _allocation_stats.coal_deaths();
duke@435 248 }
jmasa@3732 249 void set_coal_deaths(ssize_t v) {
duke@435 250 assert_proper_lock_protection();
jmasa@3732 251 _allocation_stats.set_coal_deaths(v);
duke@435 252 }
jmasa@3732 253 void increment_coal_deaths() {
duke@435 254 assert_proper_lock_protection();
jmasa@3732 255 _allocation_stats.increment_coal_deaths();
duke@435 256 }
duke@435 257
jmasa@3732 258 ssize_t split_births() const {
jmasa@3732 259 return _allocation_stats.split_births();
duke@435 260 }
jmasa@3732 261 void set_split_births(ssize_t v) {
duke@435 262 assert_proper_lock_protection();
jmasa@3732 263 _allocation_stats.set_split_births(v);
duke@435 264 }
jmasa@3732 265 void increment_split_births() {
duke@435 266 assert_proper_lock_protection();
jmasa@3732 267 _allocation_stats.increment_split_births();
duke@435 268 }
duke@435 269
jmasa@3732 270 ssize_t split_deaths() const {
jmasa@3732 271 return _allocation_stats.split_deaths();
duke@435 272 }
jmasa@3732 273 void set_split_deaths(ssize_t v) {
duke@435 274 assert_proper_lock_protection();
jmasa@3732 275 _allocation_stats.set_split_deaths(v);
duke@435 276 }
jmasa@3732 277 void increment_split_deaths() {
duke@435 278 assert_proper_lock_protection();
jmasa@3732 279 _allocation_stats.increment_split_deaths();
duke@435 280 }
duke@435 281
duke@435 282 NOT_PRODUCT(
jmasa@3732 283 // For debugging. The "_returned_bytes" in all the lists are summed
duke@435 284 // and compared with the total number of bytes swept during a
duke@435 285 // collection.
jmasa@3732 286 size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
jmasa@3732 287 void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
jmasa@3732 288 void increment_returned_bytes_by(size_t v) {
jmasa@3732 289 _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
duke@435 290 }
duke@435 291 )
duke@435 292
duke@435 293 // Unlink head of list and return it. Returns NULL if
duke@435 294 // the list is empty.
jmasa@3732 295 Chunk* get_chunk_at_head();
duke@435 296
duke@435 297 // Remove the first "n" or "count", whichever is smaller, chunks from the
duke@435 298 // list, setting "fl", which is required to be empty, to point to them.
jmasa@3730 299 void getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl);
duke@435 300
duke@435 301 // Unlink this chunk from it's free list
jmasa@3732 302 void remove_chunk(Chunk* fc);
duke@435 303
duke@435 304 // Add this chunk to this free list.
jmasa@3732 305 void return_chunk_at_head(Chunk* fc);
jmasa@3732 306 void return_chunk_at_tail(Chunk* fc);
duke@435 307
duke@435 308 // Similar to returnChunk* but also records some diagnostic
duke@435 309 // information.
jmasa@3732 310 void return_chunk_at_head(Chunk* fc, bool record_return);
jmasa@3732 311 void return_chunk_at_tail(Chunk* fc, bool record_return);
duke@435 312
duke@435 313 // Prepend "fl" (whose size is required to be the same as that of "this")
duke@435 314 // to the front of "this" list.
jmasa@3730 315 void prepend(FreeList<Chunk>* fl);
duke@435 316
duke@435 317 // Verify that the chunk is in the list.
duke@435 318 // found. Return NULL if "fc" is not found.
jmasa@3732 319 bool verify_chunk_in_free_list(Chunk* fc) const;
ysr@447 320
ysr@1580 321 // Stats verification
ysr@1580 322 void verify_stats() const PRODUCT_RETURN;
ysr@1580 323
ysr@447 324 // Printing support
ysr@447 325 static void print_labels_on(outputStream* st, const char* c);
ysr@447 326 void print_on(outputStream* st, const char* c = NULL) const;
duke@435 327 };
stefank@2314 328
jmasa@3730 329 #endif // SHARE_VM_MEMORY_FREELIST_HPP

mercurial