src/share/vm/gc_interface/collectedHeap.hpp

Mon, 09 Mar 2009 13:28:46 -0700

author
xdono
date
Mon, 09 Mar 2009 13:28:46 -0700
changeset 1014
0fbdb4381b99
parent 977
9a25e0c45327
child 1063
7bb995fbd3c0
permissions
-rw-r--r--

6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair

duke@435 1 /*
xdono@1014 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
duke@435 26 // is an abstract class: there may be many different kinds of heaps. This
duke@435 27 // class defines the functions that a heap must implement, and contains
duke@435 28 // infrastructure common to all heaps.
duke@435 29
duke@435 30 class BarrierSet;
duke@435 31 class ThreadClosure;
duke@435 32 class AdaptiveSizePolicy;
duke@435 33 class Thread;
duke@435 34
duke@435 35 //
duke@435 36 // CollectedHeap
duke@435 37 // SharedHeap
duke@435 38 // GenCollectedHeap
duke@435 39 // G1CollectedHeap
duke@435 40 // ParallelScavengeHeap
duke@435 41 //
duke@435 42 class CollectedHeap : public CHeapObj {
duke@435 43 friend class VMStructs;
duke@435 44 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
jmasa@977 45 friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe
duke@435 46
duke@435 47 #ifdef ASSERT
duke@435 48 static int _fire_out_of_memory_count;
duke@435 49 #endif
duke@435 50
jcoomes@916 51 // Used for filler objects (static, but initialized in ctor).
jcoomes@916 52 static size_t _filler_array_max_size;
jcoomes@916 53
duke@435 54 protected:
duke@435 55 MemRegion _reserved;
duke@435 56 BarrierSet* _barrier_set;
duke@435 57 bool _is_gc_active;
duke@435 58 unsigned int _total_collections; // ... started
duke@435 59 unsigned int _total_full_collections; // ... started
duke@435 60 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
duke@435 61 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
duke@435 62
duke@435 63 // Reason for current garbage collection. Should be set to
duke@435 64 // a value reflecting no collection between collections.
duke@435 65 GCCause::Cause _gc_cause;
duke@435 66 GCCause::Cause _gc_lastcause;
duke@435 67 PerfStringVariable* _perf_gc_cause;
duke@435 68 PerfStringVariable* _perf_gc_lastcause;
duke@435 69
duke@435 70 // Constructor
duke@435 71 CollectedHeap();
duke@435 72
duke@435 73 // Create a new tlab
duke@435 74 virtual HeapWord* allocate_new_tlab(size_t size);
duke@435 75
duke@435 76 // Fix up tlabs to make the heap well-formed again,
duke@435 77 // optionally retiring the tlabs.
duke@435 78 virtual void fill_all_tlabs(bool retire);
duke@435 79
duke@435 80 // Accumulate statistics on all tlabs.
duke@435 81 virtual void accumulate_statistics_all_tlabs();
duke@435 82
duke@435 83 // Reinitialize tlabs before resuming mutators.
duke@435 84 virtual void resize_all_tlabs();
duke@435 85
duke@435 86 protected:
duke@435 87 // Allocate from the current thread's TLAB, with broken-out slow path.
duke@435 88 inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
duke@435 89 static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
duke@435 90
duke@435 91 // Allocate an uninitialized block of the given size, or returns NULL if
duke@435 92 // this is impossible.
duke@435 93 inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS);
duke@435 94
duke@435 95 // Like allocate_init, but the block returned by a successful allocation
duke@435 96 // is guaranteed initialized to zeros.
duke@435 97 inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS);
duke@435 98
duke@435 99 // Same as common_mem version, except memory is allocated in the permanent area
duke@435 100 // If there is no permanent area, revert to common_mem_allocate_noinit
duke@435 101 inline static HeapWord* common_permanent_mem_allocate_noinit(size_t size, TRAPS);
duke@435 102
duke@435 103 // Same as common_mem version, except memory is allocated in the permanent area
duke@435 104 // If there is no permanent area, revert to common_mem_allocate_init
duke@435 105 inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS);
duke@435 106
duke@435 107 // Helper functions for (VM) allocation.
duke@435 108 inline static void post_allocation_setup_common(KlassHandle klass,
duke@435 109 HeapWord* obj, size_t size);
duke@435 110 inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
duke@435 111 HeapWord* objPtr,
duke@435 112 size_t size);
duke@435 113
duke@435 114 inline static void post_allocation_setup_obj(KlassHandle klass,
duke@435 115 HeapWord* obj, size_t size);
duke@435 116
duke@435 117 inline static void post_allocation_setup_array(KlassHandle klass,
duke@435 118 HeapWord* obj, size_t size,
duke@435 119 int length);
duke@435 120
duke@435 121 // Clears an allocated object.
duke@435 122 inline static void init_obj(HeapWord* obj, size_t size);
duke@435 123
jcoomes@916 124 // Filler object utilities.
jcoomes@916 125 static inline size_t filler_array_hdr_size();
jcoomes@916 126 static inline size_t filler_array_min_size();
jcoomes@916 127 static inline size_t filler_array_max_size();
jcoomes@916 128
jcoomes@916 129 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
jcoomes@916 130 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);)
jcoomes@916 131
jcoomes@916 132 // Fill with a single array; caller must ensure filler_array_min_size() <=
jcoomes@916 133 // words <= filler_array_max_size().
jcoomes@916 134 static inline void fill_with_array(HeapWord* start, size_t words);
jcoomes@916 135
jcoomes@916 136 // Fill with a single object (either an int array or a java.lang.Object).
jcoomes@916 137 static inline void fill_with_object_impl(HeapWord* start, size_t words);
jcoomes@916 138
duke@435 139 // Verification functions
duke@435 140 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
duke@435 141 PRODUCT_RETURN;
duke@435 142 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
duke@435 143 PRODUCT_RETURN;
jmasa@977 144 debug_only(static void check_for_valid_allocation_state();)
duke@435 145
duke@435 146 public:
duke@435 147 enum Name {
duke@435 148 Abstract,
duke@435 149 SharedHeap,
duke@435 150 GenCollectedHeap,
duke@435 151 ParallelScavengeHeap,
duke@435 152 G1CollectedHeap
duke@435 153 };
duke@435 154
duke@435 155 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
duke@435 156
duke@435 157 /**
duke@435 158 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
duke@435 159 * and JNI_OK on success.
duke@435 160 */
duke@435 161 virtual jint initialize() = 0;
duke@435 162
duke@435 163 // In many heaps, there will be a need to perform some initialization activities
duke@435 164 // after the Universe is fully formed, but before general heap allocation is allowed.
duke@435 165 // This is the correct place to place such initialization methods.
duke@435 166 virtual void post_initialize() = 0;
duke@435 167
duke@435 168 MemRegion reserved_region() const { return _reserved; }
coleenp@548 169 address base() const { return (address)reserved_region().start(); }
duke@435 170
duke@435 171 // Future cleanup here. The following functions should specify bytes or
duke@435 172 // heapwords as part of their signature.
duke@435 173 virtual size_t capacity() const = 0;
duke@435 174 virtual size_t used() const = 0;
duke@435 175
duke@435 176 // Return "true" if the part of the heap that allocates Java
duke@435 177 // objects has reached the maximal committed limit that it can
duke@435 178 // reach, without a garbage collection.
duke@435 179 virtual bool is_maximal_no_gc() const = 0;
duke@435 180
duke@435 181 virtual size_t permanent_capacity() const = 0;
duke@435 182 virtual size_t permanent_used() const = 0;
duke@435 183
duke@435 184 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
duke@435 185 // memory that the vm could make available for storing 'normal' java objects.
duke@435 186 // This is based on the reserved address space, but should not include space
duke@435 187 // that the vm uses internally for bookkeeping or temporary storage (e.g.,
duke@435 188 // perm gen space or, in the case of the young gen, one of the survivor
duke@435 189 // spaces).
duke@435 190 virtual size_t max_capacity() const = 0;
duke@435 191
duke@435 192 // Returns "TRUE" if "p" points into the reserved area of the heap.
duke@435 193 bool is_in_reserved(const void* p) const {
duke@435 194 return _reserved.contains(p);
duke@435 195 }
duke@435 196
duke@435 197 bool is_in_reserved_or_null(const void* p) const {
duke@435 198 return p == NULL || is_in_reserved(p);
duke@435 199 }
duke@435 200
duke@435 201 // Returns "TRUE" if "p" points to the head of an allocated object in the
duke@435 202 // heap. Since this method can be expensive in general, we restrict its
duke@435 203 // use to assertion checking only.
duke@435 204 virtual bool is_in(const void* p) const = 0;
duke@435 205
duke@435 206 bool is_in_or_null(const void* p) const {
duke@435 207 return p == NULL || is_in(p);
duke@435 208 }
duke@435 209
duke@435 210 // Let's define some terms: a "closed" subset of a heap is one that
duke@435 211 //
duke@435 212 // 1) contains all currently-allocated objects, and
duke@435 213 //
duke@435 214 // 2) is closed under reference: no object in the closed subset
duke@435 215 // references one outside the closed subset.
duke@435 216 //
duke@435 217 // Membership in a heap's closed subset is useful for assertions.
duke@435 218 // Clearly, the entire heap is a closed subset, so the default
duke@435 219 // implementation is to use "is_in_reserved". But this may not be too
duke@435 220 // liberal to perform useful checking. Also, the "is_in" predicate
duke@435 221 // defines a closed subset, but may be too expensive, since "is_in"
duke@435 222 // verifies that its argument points to an object head. The
duke@435 223 // "closed_subset" method allows a heap to define an intermediate
duke@435 224 // predicate, allowing more precise checking than "is_in_reserved" at
duke@435 225 // lower cost than "is_in."
duke@435 226
duke@435 227 // One important case is a heap composed of disjoint contiguous spaces,
duke@435 228 // such as the Garbage-First collector. Such heaps have a convenient
duke@435 229 // closed subset consisting of the allocated portions of those
duke@435 230 // contiguous spaces.
duke@435 231
duke@435 232 // Return "TRUE" iff the given pointer points into the heap's defined
duke@435 233 // closed subset (which defaults to the entire heap).
duke@435 234 virtual bool is_in_closed_subset(const void* p) const {
duke@435 235 return is_in_reserved(p);
duke@435 236 }
duke@435 237
duke@435 238 bool is_in_closed_subset_or_null(const void* p) const {
duke@435 239 return p == NULL || is_in_closed_subset(p);
duke@435 240 }
duke@435 241
duke@435 242 // Returns "TRUE" if "p" is allocated as "permanent" data.
duke@435 243 // If the heap does not use "permanent" data, returns the same
duke@435 244 // value is_in_reserved() would return.
duke@435 245 // NOTE: this actually returns true if "p" is in reserved space
duke@435 246 // for the space not that it is actually allocated (i.e. in committed
duke@435 247 // space). If you need the more conservative answer use is_permanent().
duke@435 248 virtual bool is_in_permanent(const void *p) const = 0;
duke@435 249
duke@435 250 // Returns "TRUE" if "p" is in the committed area of "permanent" data.
duke@435 251 // If the heap does not use "permanent" data, returns the same
duke@435 252 // value is_in() would return.
duke@435 253 virtual bool is_permanent(const void *p) const = 0;
duke@435 254
duke@435 255 bool is_in_permanent_or_null(const void *p) const {
duke@435 256 return p == NULL || is_in_permanent(p);
duke@435 257 }
duke@435 258
duke@435 259 // Returns "TRUE" if "p" is a method oop in the
duke@435 260 // current heap, with high probability. This predicate
duke@435 261 // is not stable, in general.
duke@435 262 bool is_valid_method(oop p) const;
duke@435 263
duke@435 264 void set_gc_cause(GCCause::Cause v) {
duke@435 265 if (UsePerfData) {
duke@435 266 _gc_lastcause = _gc_cause;
duke@435 267 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
duke@435 268 _perf_gc_cause->set_value(GCCause::to_string(v));
duke@435 269 }
duke@435 270 _gc_cause = v;
duke@435 271 }
duke@435 272 GCCause::Cause gc_cause() { return _gc_cause; }
duke@435 273
duke@435 274 // Preload classes into the shared portion of the heap, and then dump
duke@435 275 // that data to a file so that it can be loaded directly by another
duke@435 276 // VM (then terminate).
duke@435 277 virtual void preload_and_dump(TRAPS) { ShouldNotReachHere(); }
duke@435 278
duke@435 279 // General obj/array allocation facilities.
duke@435 280 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
duke@435 281 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
duke@435 282 inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS);
duke@435 283
duke@435 284 // Special obj/array allocation facilities.
duke@435 285 // Some heaps may want to manage "permanent" data uniquely. These default
duke@435 286 // to the general routines if the heap does not support such handling.
duke@435 287 inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS);
duke@435 288 // permanent_obj_allocate_no_klass_install() does not do the installation of
duke@435 289 // the klass pointer in the newly created object (as permanent_obj_allocate()
duke@435 290 // above does). This allows for a delay in the installation of the klass
duke@435 291 // pointer that is needed during the create of klassKlass's. The
duke@435 292 // method post_allocation_install_obj_klass() is used to install the
duke@435 293 // klass pointer.
duke@435 294 inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass,
duke@435 295 int size,
duke@435 296 TRAPS);
duke@435 297 inline static void post_allocation_install_obj_klass(KlassHandle klass,
duke@435 298 oop obj,
duke@435 299 int size);
duke@435 300 inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
duke@435 301
duke@435 302 // Raw memory allocation facilities
duke@435 303 // The obj and array allocate methods are covers for these methods.
duke@435 304 // The permanent allocation method should default to mem_allocate if
duke@435 305 // permanent memory isn't supported.
duke@435 306 virtual HeapWord* mem_allocate(size_t size,
duke@435 307 bool is_noref,
duke@435 308 bool is_tlab,
duke@435 309 bool* gc_overhead_limit_was_exceeded) = 0;
duke@435 310 virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
duke@435 311
duke@435 312 // The boundary between a "large" and "small" array of primitives, in words.
duke@435 313 virtual size_t large_typearray_limit() = 0;
duke@435 314
jcoomes@916 315 // Utilities for turning raw memory into filler objects.
jcoomes@916 316 //
jcoomes@916 317 // min_fill_size() is the smallest region that can be filled.
jcoomes@916 318 // fill_with_objects() can fill arbitrary-sized regions of the heap using
jcoomes@916 319 // multiple objects. fill_with_object() is for regions known to be smaller
jcoomes@916 320 // than the largest array of integers; it uses a single object to fill the
jcoomes@916 321 // region and has slightly less overhead.
jcoomes@916 322 static size_t min_fill_size() {
jcoomes@916 323 return size_t(align_object_size(oopDesc::header_size()));
jcoomes@916 324 }
jcoomes@916 325
jcoomes@916 326 static void fill_with_objects(HeapWord* start, size_t words);
jcoomes@916 327
jcoomes@916 328 static void fill_with_object(HeapWord* start, size_t words);
jcoomes@916 329 static void fill_with_object(MemRegion region) {
jcoomes@916 330 fill_with_object(region.start(), region.word_size());
jcoomes@916 331 }
jcoomes@916 332 static void fill_with_object(HeapWord* start, HeapWord* end) {
jcoomes@916 333 fill_with_object(start, pointer_delta(end, start));
jcoomes@916 334 }
jcoomes@916 335
duke@435 336 // Some heaps may offer a contiguous region for shared non-blocking
duke@435 337 // allocation, via inlined code (by exporting the address of the top and
duke@435 338 // end fields defining the extent of the contiguous allocation region.)
duke@435 339
duke@435 340 // This function returns "true" iff the heap supports this kind of
duke@435 341 // allocation. (Default is "no".)
duke@435 342 virtual bool supports_inline_contig_alloc() const {
duke@435 343 return false;
duke@435 344 }
duke@435 345 // These functions return the addresses of the fields that define the
duke@435 346 // boundaries of the contiguous allocation area. (These fields should be
duke@435 347 // physically near to one another.)
duke@435 348 virtual HeapWord** top_addr() const {
duke@435 349 guarantee(false, "inline contiguous allocation not supported");
duke@435 350 return NULL;
duke@435 351 }
duke@435 352 virtual HeapWord** end_addr() const {
duke@435 353 guarantee(false, "inline contiguous allocation not supported");
duke@435 354 return NULL;
duke@435 355 }
duke@435 356
duke@435 357 // Some heaps may be in an unparseable state at certain times between
duke@435 358 // collections. This may be necessary for efficient implementation of
duke@435 359 // certain allocation-related activities. Calling this function before
duke@435 360 // attempting to parse a heap ensures that the heap is in a parsable
duke@435 361 // state (provided other concurrent activity does not introduce
duke@435 362 // unparsability). It is normally expected, therefore, that this
duke@435 363 // method is invoked with the world stopped.
duke@435 364 // NOTE: if you override this method, make sure you call
duke@435 365 // super::ensure_parsability so that the non-generational
duke@435 366 // part of the work gets done. See implementation of
duke@435 367 // CollectedHeap::ensure_parsability and, for instance,
duke@435 368 // that of GenCollectedHeap::ensure_parsability().
duke@435 369 // The argument "retire_tlabs" controls whether existing TLABs
duke@435 370 // are merely filled or also retired, thus preventing further
duke@435 371 // allocation from them and necessitating allocation of new TLABs.
duke@435 372 virtual void ensure_parsability(bool retire_tlabs);
duke@435 373
duke@435 374 // Return an estimate of the maximum allocation that could be performed
duke@435 375 // without triggering any collection or expansion activity. In a
duke@435 376 // generational collector, for example, this is probably the largest
duke@435 377 // allocation that could be supported (without expansion) in the youngest
duke@435 378 // generation. It is "unsafe" because no locks are taken; the result
duke@435 379 // should be treated as an approximation, not a guarantee, for use in
duke@435 380 // heuristic resizing decisions.
duke@435 381 virtual size_t unsafe_max_alloc() = 0;
duke@435 382
duke@435 383 // Section on thread-local allocation buffers (TLABs)
duke@435 384 // If the heap supports thread-local allocation buffers, it should override
duke@435 385 // the following methods:
duke@435 386 // Returns "true" iff the heap supports thread-local allocation buffers.
duke@435 387 // The default is "no".
duke@435 388 virtual bool supports_tlab_allocation() const {
duke@435 389 return false;
duke@435 390 }
duke@435 391 // The amount of space available for thread-local allocation buffers.
duke@435 392 virtual size_t tlab_capacity(Thread *thr) const {
duke@435 393 guarantee(false, "thread-local allocation buffers not supported");
duke@435 394 return 0;
duke@435 395 }
duke@435 396 // An estimate of the maximum allocation that could be performed
duke@435 397 // for thread-local allocation buffers without triggering any
duke@435 398 // collection or expansion activity.
duke@435 399 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
duke@435 400 guarantee(false, "thread-local allocation buffers not supported");
duke@435 401 return 0;
duke@435 402 }
duke@435 403 // Can a compiler initialize a new object without store barriers?
duke@435 404 // This permission only extends from the creation of a new object
duke@435 405 // via a TLAB up to the first subsequent safepoint.
ysr@777 406 virtual bool can_elide_tlab_store_barriers() const = 0;
ysr@777 407
duke@435 408 // If a compiler is eliding store barriers for TLAB-allocated objects,
duke@435 409 // there is probably a corresponding slow path which can produce
duke@435 410 // an object allocated anywhere. The compiler's runtime support
duke@435 411 // promises to call this function on such a slow-path-allocated
duke@435 412 // object before performing initializations that have elided
duke@435 413 // store barriers. Returns new_obj, or maybe a safer copy thereof.
duke@435 414 virtual oop new_store_barrier(oop new_obj);
duke@435 415
duke@435 416 // Can a compiler elide a store barrier when it writes
duke@435 417 // a permanent oop into the heap? Applies when the compiler
duke@435 418 // is storing x to the heap, where x->is_perm() is true.
ysr@777 419 virtual bool can_elide_permanent_oop_store_barriers() const = 0;
duke@435 420
duke@435 421 // Does this heap support heap inspection (+PrintClassHistogram?)
ysr@777 422 virtual bool supports_heap_inspection() const = 0;
duke@435 423
duke@435 424 // Perform a collection of the heap; intended for use in implementing
duke@435 425 // "System.gc". This probably implies as full a collection as the
duke@435 426 // "CollectedHeap" supports.
duke@435 427 virtual void collect(GCCause::Cause cause) = 0;
duke@435 428
duke@435 429 // This interface assumes that it's being called by the
duke@435 430 // vm thread. It collects the heap assuming that the
duke@435 431 // heap lock is already held and that we are executing in
duke@435 432 // the context of the vm thread.
duke@435 433 virtual void collect_as_vm_thread(GCCause::Cause cause) = 0;
duke@435 434
duke@435 435 // Returns the barrier set for this heap
duke@435 436 BarrierSet* barrier_set() { return _barrier_set; }
duke@435 437
duke@435 438 // Returns "true" iff there is a stop-world GC in progress. (I assume
duke@435 439 // that it should answer "false" for the concurrent part of a concurrent
duke@435 440 // collector -- dld).
duke@435 441 bool is_gc_active() const { return _is_gc_active; }
duke@435 442
duke@435 443 // Total number of GC collections (started)
duke@435 444 unsigned int total_collections() const { return _total_collections; }
duke@435 445 unsigned int total_full_collections() const { return _total_full_collections;}
duke@435 446
duke@435 447 // Increment total number of GC collections (started)
duke@435 448 // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
duke@435 449 void increment_total_collections(bool full = false) {
duke@435 450 _total_collections++;
duke@435 451 if (full) {
duke@435 452 increment_total_full_collections();
duke@435 453 }
duke@435 454 }
duke@435 455
duke@435 456 void increment_total_full_collections() { _total_full_collections++; }
duke@435 457
duke@435 458 // Return the AdaptiveSizePolicy for the heap.
duke@435 459 virtual AdaptiveSizePolicy* size_policy() = 0;
duke@435 460
duke@435 461 // Iterate over all the ref-containing fields of all objects, calling
duke@435 462 // "cl.do_oop" on each. This includes objects in permanent memory.
duke@435 463 virtual void oop_iterate(OopClosure* cl) = 0;
duke@435 464
duke@435 465 // Iterate over all objects, calling "cl.do_object" on each.
duke@435 466 // This includes objects in permanent memory.
duke@435 467 virtual void object_iterate(ObjectClosure* cl) = 0;
duke@435 468
jmasa@952 469 // Similar to object_iterate() except iterates only
jmasa@952 470 // over live objects.
jmasa@952 471 virtual void safe_object_iterate(ObjectClosure* cl) = 0;
jmasa@952 472
duke@435 473 // Behaves the same as oop_iterate, except only traverses
duke@435 474 // interior pointers contained in permanent memory. If there
duke@435 475 // is no permanent memory, does nothing.
duke@435 476 virtual void permanent_oop_iterate(OopClosure* cl) = 0;
duke@435 477
duke@435 478 // Behaves the same as object_iterate, except only traverses
duke@435 479 // object contained in permanent memory. If there is no
duke@435 480 // permanent memory, does nothing.
duke@435 481 virtual void permanent_object_iterate(ObjectClosure* cl) = 0;
duke@435 482
duke@435 483 // NOTE! There is no requirement that a collector implement these
duke@435 484 // functions.
duke@435 485 //
duke@435 486 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
duke@435 487 // each address in the (reserved) heap is a member of exactly
duke@435 488 // one block. The defining characteristic of a block is that it is
duke@435 489 // possible to find its size, and thus to progress forward to the next
duke@435 490 // block. (Blocks may be of different sizes.) Thus, blocks may
duke@435 491 // represent Java objects, or they might be free blocks in a
duke@435 492 // free-list-based heap (or subheap), as long as the two kinds are
duke@435 493 // distinguishable and the size of each is determinable.
duke@435 494
duke@435 495 // Returns the address of the start of the "block" that contains the
duke@435 496 // address "addr". We say "blocks" instead of "object" since some heaps
duke@435 497 // may not pack objects densely; a chunk may either be an object or a
duke@435 498 // non-object.
duke@435 499 virtual HeapWord* block_start(const void* addr) const = 0;
duke@435 500
duke@435 501 // Requires "addr" to be the start of a chunk, and returns its size.
duke@435 502 // "addr + size" is required to be the start of a new chunk, or the end
duke@435 503 // of the active area of the heap.
duke@435 504 virtual size_t block_size(const HeapWord* addr) const = 0;
duke@435 505
duke@435 506 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 507 // the block is an object.
duke@435 508 virtual bool block_is_obj(const HeapWord* addr) const = 0;
duke@435 509
duke@435 510 // Returns the longest time (in ms) that has elapsed since the last
duke@435 511 // time that any part of the heap was examined by a garbage collection.
duke@435 512 virtual jlong millis_since_last_gc() = 0;
duke@435 513
duke@435 514 // Perform any cleanup actions necessary before allowing a verification.
duke@435 515 virtual void prepare_for_verify() = 0;
duke@435 516
duke@435 517 virtual void print() const = 0;
duke@435 518 virtual void print_on(outputStream* st) const = 0;
duke@435 519
duke@435 520 // Print all GC threads (other than the VM thread)
duke@435 521 // used by this heap.
duke@435 522 virtual void print_gc_threads_on(outputStream* st) const = 0;
duke@435 523 void print_gc_threads() { print_gc_threads_on(tty); }
duke@435 524 // Iterator for all GC threads (other than VM thread)
duke@435 525 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
duke@435 526
duke@435 527 // Print any relevant tracing info that flags imply.
duke@435 528 // Default implementation does nothing.
duke@435 529 virtual void print_tracing_info() const = 0;
duke@435 530
duke@435 531 // Heap verification
duke@435 532 virtual void verify(bool allow_dirty, bool silent) = 0;
duke@435 533
duke@435 534 // Non product verification and debugging.
duke@435 535 #ifndef PRODUCT
duke@435 536 // Support for PromotionFailureALot. Return true if it's time to cause a
duke@435 537 // promotion failure. The no-argument version uses
duke@435 538 // this->_promotion_failure_alot_count as the counter.
duke@435 539 inline bool promotion_should_fail(volatile size_t* count);
duke@435 540 inline bool promotion_should_fail();
duke@435 541
duke@435 542 // Reset the PromotionFailureALot counters. Should be called at the end of a
duke@435 543 // GC in which promotion failure ocurred.
duke@435 544 inline void reset_promotion_should_fail(volatile size_t* count);
duke@435 545 inline void reset_promotion_should_fail();
duke@435 546 #endif // #ifndef PRODUCT
duke@435 547
duke@435 548 #ifdef ASSERT
duke@435 549 static int fired_fake_oom() {
duke@435 550 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
duke@435 551 }
duke@435 552 #endif
duke@435 553 };
duke@435 554
duke@435 555 // Class to set and reset the GC cause for a CollectedHeap.
duke@435 556
duke@435 557 class GCCauseSetter : StackObj {
duke@435 558 CollectedHeap* _heap;
duke@435 559 GCCause::Cause _previous_cause;
duke@435 560 public:
duke@435 561 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
duke@435 562 assert(SafepointSynchronize::is_at_safepoint(),
duke@435 563 "This method manipulates heap state without locking");
duke@435 564 _heap = heap;
duke@435 565 _previous_cause = _heap->gc_cause();
duke@435 566 _heap->set_gc_cause(cause);
duke@435 567 }
duke@435 568
duke@435 569 ~GCCauseSetter() {
duke@435 570 assert(SafepointSynchronize::is_at_safepoint(),
duke@435 571 "This method manipulates heap state without locking");
duke@435 572 _heap->set_gc_cause(_previous_cause);
duke@435 573 }
duke@435 574 };

mercurial