src/share/vm/gc_interface/collectedHeap.hpp

Tue, 14 Jun 2011 11:01:10 -0700

author
johnc
date
Tue, 14 Jun 2011 11:01:10 -0700
changeset 2969
6747fd0512e0
parent 2909
2aa9ddbb9e60
child 2971
c9ca3f51cf41
permissions
-rw-r--r--

7004681: G1: Extend marking verification to Full GCs
Summary: Perform a heap verification after the first phase of G1's full GC using objects' mark words to determine liveness. The third parameter of the heap verification routines, which was used in G1 to determine which marking bitmap to use in liveness calculations, has been changed from a boolean to an enum with values defined for using the mark word, and the 'prev' and 'next' bitmaps.
Reviewed-by: tonyp, ysr

duke@435 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
stefank@2314 26 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
stefank@2314 27
stefank@2314 28 #include "gc_interface/gcCause.hpp"
stefank@2314 29 #include "memory/allocation.hpp"
stefank@2314 30 #include "memory/barrierSet.hpp"
stefank@2314 31 #include "runtime/handles.hpp"
stefank@2314 32 #include "runtime/perfData.hpp"
stefank@2314 33 #include "runtime/safepoint.hpp"
stefank@2314 34
duke@435 35 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
duke@435 36 // is an abstract class: there may be many different kinds of heaps. This
duke@435 37 // class defines the functions that a heap must implement, and contains
duke@435 38 // infrastructure common to all heaps.
duke@435 39
duke@435 40 class BarrierSet;
duke@435 41 class ThreadClosure;
duke@435 42 class AdaptiveSizePolicy;
duke@435 43 class Thread;
jmasa@1822 44 class CollectorPolicy;
duke@435 45
duke@435 46 //
duke@435 47 // CollectedHeap
duke@435 48 // SharedHeap
duke@435 49 // GenCollectedHeap
duke@435 50 // G1CollectedHeap
duke@435 51 // ParallelScavengeHeap
duke@435 52 //
duke@435 53 class CollectedHeap : public CHeapObj {
duke@435 54 friend class VMStructs;
duke@435 55 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
jmasa@977 56 friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe
duke@435 57
duke@435 58 #ifdef ASSERT
duke@435 59 static int _fire_out_of_memory_count;
duke@435 60 #endif
duke@435 61
jcoomes@916 62 // Used for filler objects (static, but initialized in ctor).
jcoomes@916 63 static size_t _filler_array_max_size;
jcoomes@916 64
ysr@1601 65 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
ysr@1601 66 bool _defer_initial_card_mark;
ysr@1601 67
duke@435 68 protected:
duke@435 69 MemRegion _reserved;
duke@435 70 BarrierSet* _barrier_set;
duke@435 71 bool _is_gc_active;
jmasa@2188 72 int _n_par_threads;
jmasa@2188 73
duke@435 74 unsigned int _total_collections; // ... started
duke@435 75 unsigned int _total_full_collections; // ... started
duke@435 76 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
duke@435 77 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
duke@435 78
duke@435 79 // Reason for current garbage collection. Should be set to
duke@435 80 // a value reflecting no collection between collections.
duke@435 81 GCCause::Cause _gc_cause;
duke@435 82 GCCause::Cause _gc_lastcause;
duke@435 83 PerfStringVariable* _perf_gc_cause;
duke@435 84 PerfStringVariable* _perf_gc_lastcause;
duke@435 85
duke@435 86 // Constructor
duke@435 87 CollectedHeap();
duke@435 88
ysr@1601 89 // Do common initializations that must follow instance construction,
ysr@1601 90 // for example, those needing virtual calls.
ysr@1601 91 // This code could perhaps be moved into initialize() but would
ysr@1601 92 // be slightly more awkward because we want the latter to be a
ysr@1601 93 // pure virtual.
ysr@1601 94 void pre_initialize();
ysr@1601 95
duke@435 96 // Create a new tlab
duke@435 97 virtual HeapWord* allocate_new_tlab(size_t size);
duke@435 98
duke@435 99 // Accumulate statistics on all tlabs.
duke@435 100 virtual void accumulate_statistics_all_tlabs();
duke@435 101
duke@435 102 // Reinitialize tlabs before resuming mutators.
duke@435 103 virtual void resize_all_tlabs();
duke@435 104
duke@435 105 protected:
duke@435 106 // Allocate from the current thread's TLAB, with broken-out slow path.
duke@435 107 inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
duke@435 108 static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
duke@435 109
duke@435 110 // Allocate an uninitialized block of the given size, or returns NULL if
duke@435 111 // this is impossible.
duke@435 112 inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS);
duke@435 113
duke@435 114 // Like allocate_init, but the block returned by a successful allocation
duke@435 115 // is guaranteed initialized to zeros.
duke@435 116 inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS);
duke@435 117
duke@435 118 // Same as common_mem version, except memory is allocated in the permanent area
duke@435 119 // If there is no permanent area, revert to common_mem_allocate_noinit
duke@435 120 inline static HeapWord* common_permanent_mem_allocate_noinit(size_t size, TRAPS);
duke@435 121
duke@435 122 // Same as common_mem version, except memory is allocated in the permanent area
duke@435 123 // If there is no permanent area, revert to common_mem_allocate_init
duke@435 124 inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS);
duke@435 125
duke@435 126 // Helper functions for (VM) allocation.
duke@435 127 inline static void post_allocation_setup_common(KlassHandle klass,
duke@435 128 HeapWord* obj, size_t size);
duke@435 129 inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
duke@435 130 HeapWord* objPtr,
duke@435 131 size_t size);
duke@435 132
duke@435 133 inline static void post_allocation_setup_obj(KlassHandle klass,
duke@435 134 HeapWord* obj, size_t size);
duke@435 135
duke@435 136 inline static void post_allocation_setup_array(KlassHandle klass,
duke@435 137 HeapWord* obj, size_t size,
duke@435 138 int length);
duke@435 139
duke@435 140 // Clears an allocated object.
duke@435 141 inline static void init_obj(HeapWord* obj, size_t size);
duke@435 142
jcoomes@916 143 // Filler object utilities.
jcoomes@916 144 static inline size_t filler_array_hdr_size();
jcoomes@916 145 static inline size_t filler_array_min_size();
jcoomes@916 146 static inline size_t filler_array_max_size();
jcoomes@916 147
jcoomes@916 148 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
johnc@1600 149 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
jcoomes@916 150
jcoomes@916 151 // Fill with a single array; caller must ensure filler_array_min_size() <=
jcoomes@916 152 // words <= filler_array_max_size().
johnc@1600 153 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
jcoomes@916 154
jcoomes@916 155 // Fill with a single object (either an int array or a java.lang.Object).
johnc@1600 156 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
jcoomes@916 157
duke@435 158 // Verification functions
duke@435 159 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
duke@435 160 PRODUCT_RETURN;
duke@435 161 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
duke@435 162 PRODUCT_RETURN;
jmasa@977 163 debug_only(static void check_for_valid_allocation_state();)
duke@435 164
duke@435 165 public:
duke@435 166 enum Name {
duke@435 167 Abstract,
duke@435 168 SharedHeap,
duke@435 169 GenCollectedHeap,
duke@435 170 ParallelScavengeHeap,
duke@435 171 G1CollectedHeap
duke@435 172 };
duke@435 173
duke@435 174 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
duke@435 175
duke@435 176 /**
duke@435 177 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
duke@435 178 * and JNI_OK on success.
duke@435 179 */
duke@435 180 virtual jint initialize() = 0;
duke@435 181
duke@435 182 // In many heaps, there will be a need to perform some initialization activities
duke@435 183 // after the Universe is fully formed, but before general heap allocation is allowed.
duke@435 184 // This is the correct place to place such initialization methods.
duke@435 185 virtual void post_initialize() = 0;
duke@435 186
duke@435 187 MemRegion reserved_region() const { return _reserved; }
coleenp@548 188 address base() const { return (address)reserved_region().start(); }
duke@435 189
duke@435 190 // Future cleanup here. The following functions should specify bytes or
duke@435 191 // heapwords as part of their signature.
duke@435 192 virtual size_t capacity() const = 0;
duke@435 193 virtual size_t used() const = 0;
duke@435 194
duke@435 195 // Return "true" if the part of the heap that allocates Java
duke@435 196 // objects has reached the maximal committed limit that it can
duke@435 197 // reach, without a garbage collection.
duke@435 198 virtual bool is_maximal_no_gc() const = 0;
duke@435 199
duke@435 200 virtual size_t permanent_capacity() const = 0;
duke@435 201 virtual size_t permanent_used() const = 0;
duke@435 202
duke@435 203 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
duke@435 204 // memory that the vm could make available for storing 'normal' java objects.
duke@435 205 // This is based on the reserved address space, but should not include space
duke@435 206 // that the vm uses internally for bookkeeping or temporary storage (e.g.,
duke@435 207 // perm gen space or, in the case of the young gen, one of the survivor
duke@435 208 // spaces).
duke@435 209 virtual size_t max_capacity() const = 0;
duke@435 210
duke@435 211 // Returns "TRUE" if "p" points into the reserved area of the heap.
duke@435 212 bool is_in_reserved(const void* p) const {
duke@435 213 return _reserved.contains(p);
duke@435 214 }
duke@435 215
duke@435 216 bool is_in_reserved_or_null(const void* p) const {
duke@435 217 return p == NULL || is_in_reserved(p);
duke@435 218 }
duke@435 219
duke@435 220 // Returns "TRUE" if "p" points to the head of an allocated object in the
duke@435 221 // heap. Since this method can be expensive in general, we restrict its
duke@435 222 // use to assertion checking only.
duke@435 223 virtual bool is_in(const void* p) const = 0;
duke@435 224
duke@435 225 bool is_in_or_null(const void* p) const {
duke@435 226 return p == NULL || is_in(p);
duke@435 227 }
duke@435 228
duke@435 229 // Let's define some terms: a "closed" subset of a heap is one that
duke@435 230 //
duke@435 231 // 1) contains all currently-allocated objects, and
duke@435 232 //
duke@435 233 // 2) is closed under reference: no object in the closed subset
duke@435 234 // references one outside the closed subset.
duke@435 235 //
duke@435 236 // Membership in a heap's closed subset is useful for assertions.
duke@435 237 // Clearly, the entire heap is a closed subset, so the default
duke@435 238 // implementation is to use "is_in_reserved". But this may not be too
duke@435 239 // liberal to perform useful checking. Also, the "is_in" predicate
duke@435 240 // defines a closed subset, but may be too expensive, since "is_in"
duke@435 241 // verifies that its argument points to an object head. The
duke@435 242 // "closed_subset" method allows a heap to define an intermediate
duke@435 243 // predicate, allowing more precise checking than "is_in_reserved" at
duke@435 244 // lower cost than "is_in."
duke@435 245
duke@435 246 // One important case is a heap composed of disjoint contiguous spaces,
duke@435 247 // such as the Garbage-First collector. Such heaps have a convenient
duke@435 248 // closed subset consisting of the allocated portions of those
duke@435 249 // contiguous spaces.
duke@435 250
duke@435 251 // Return "TRUE" iff the given pointer points into the heap's defined
duke@435 252 // closed subset (which defaults to the entire heap).
duke@435 253 virtual bool is_in_closed_subset(const void* p) const {
duke@435 254 return is_in_reserved(p);
duke@435 255 }
duke@435 256
duke@435 257 bool is_in_closed_subset_or_null(const void* p) const {
duke@435 258 return p == NULL || is_in_closed_subset(p);
duke@435 259 }
duke@435 260
ysr@1376 261 // XXX is_permanent() and is_in_permanent() should be better named
ysr@1376 262 // to distinguish one from the other.
ysr@1376 263
duke@435 264 // Returns "TRUE" if "p" is allocated as "permanent" data.
duke@435 265 // If the heap does not use "permanent" data, returns the same
duke@435 266 // value is_in_reserved() would return.
duke@435 267 // NOTE: this actually returns true if "p" is in reserved space
duke@435 268 // for the space not that it is actually allocated (i.e. in committed
duke@435 269 // space). If you need the more conservative answer use is_permanent().
duke@435 270 virtual bool is_in_permanent(const void *p) const = 0;
duke@435 271
jmasa@2909 272
jmasa@2909 273 #ifdef ASSERT
jmasa@2909 274 // Returns true if "p" is in the part of the
jmasa@2909 275 // heap being collected.
jmasa@2909 276 virtual bool is_in_partial_collection(const void *p) = 0;
jmasa@2909 277 #endif
jmasa@2909 278
ysr@1376 279 bool is_in_permanent_or_null(const void *p) const {
ysr@1376 280 return p == NULL || is_in_permanent(p);
ysr@1376 281 }
ysr@1376 282
duke@435 283 // Returns "TRUE" if "p" is in the committed area of "permanent" data.
duke@435 284 // If the heap does not use "permanent" data, returns the same
duke@435 285 // value is_in() would return.
duke@435 286 virtual bool is_permanent(const void *p) const = 0;
duke@435 287
ysr@1376 288 bool is_permanent_or_null(const void *p) const {
ysr@1376 289 return p == NULL || is_permanent(p);
duke@435 290 }
duke@435 291
jrose@1424 292 // An object is scavengable if its location may move during a scavenge.
jrose@1424 293 // (A scavenge is a GC which is not a full GC.)
jmasa@2909 294 virtual bool is_scavengable(const void *p) = 0;
jrose@1424 295
duke@435 296 // Returns "TRUE" if "p" is a method oop in the
duke@435 297 // current heap, with high probability. This predicate
duke@435 298 // is not stable, in general.
duke@435 299 bool is_valid_method(oop p) const;
duke@435 300
duke@435 301 void set_gc_cause(GCCause::Cause v) {
duke@435 302 if (UsePerfData) {
duke@435 303 _gc_lastcause = _gc_cause;
duke@435 304 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
duke@435 305 _perf_gc_cause->set_value(GCCause::to_string(v));
duke@435 306 }
duke@435 307 _gc_cause = v;
duke@435 308 }
duke@435 309 GCCause::Cause gc_cause() { return _gc_cause; }
duke@435 310
jmasa@2188 311 // Number of threads currently working on GC tasks.
jmasa@2188 312 int n_par_threads() { return _n_par_threads; }
jmasa@2188 313
jmasa@2188 314 // May be overridden to set additional parallelism.
jmasa@2188 315 virtual void set_par_threads(int t) { _n_par_threads = t; };
jmasa@2188 316
duke@435 317 // Preload classes into the shared portion of the heap, and then dump
duke@435 318 // that data to a file so that it can be loaded directly by another
duke@435 319 // VM (then terminate).
duke@435 320 virtual void preload_and_dump(TRAPS) { ShouldNotReachHere(); }
duke@435 321
duke@435 322 // General obj/array allocation facilities.
duke@435 323 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
duke@435 324 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
duke@435 325 inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS);
duke@435 326
duke@435 327 // Special obj/array allocation facilities.
duke@435 328 // Some heaps may want to manage "permanent" data uniquely. These default
duke@435 329 // to the general routines if the heap does not support such handling.
duke@435 330 inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS);
duke@435 331 // permanent_obj_allocate_no_klass_install() does not do the installation of
duke@435 332 // the klass pointer in the newly created object (as permanent_obj_allocate()
duke@435 333 // above does). This allows for a delay in the installation of the klass
duke@435 334 // pointer that is needed during the create of klassKlass's. The
duke@435 335 // method post_allocation_install_obj_klass() is used to install the
duke@435 336 // klass pointer.
duke@435 337 inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass,
duke@435 338 int size,
duke@435 339 TRAPS);
duke@435 340 inline static void post_allocation_install_obj_klass(KlassHandle klass,
duke@435 341 oop obj,
duke@435 342 int size);
duke@435 343 inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
duke@435 344
duke@435 345 // Raw memory allocation facilities
duke@435 346 // The obj and array allocate methods are covers for these methods.
duke@435 347 // The permanent allocation method should default to mem_allocate if
duke@435 348 // permanent memory isn't supported.
duke@435 349 virtual HeapWord* mem_allocate(size_t size,
duke@435 350 bool is_noref,
duke@435 351 bool is_tlab,
duke@435 352 bool* gc_overhead_limit_was_exceeded) = 0;
duke@435 353 virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
duke@435 354
duke@435 355 // The boundary between a "large" and "small" array of primitives, in words.
duke@435 356 virtual size_t large_typearray_limit() = 0;
duke@435 357
jcoomes@916 358 // Utilities for turning raw memory into filler objects.
jcoomes@916 359 //
jcoomes@916 360 // min_fill_size() is the smallest region that can be filled.
jcoomes@916 361 // fill_with_objects() can fill arbitrary-sized regions of the heap using
jcoomes@916 362 // multiple objects. fill_with_object() is for regions known to be smaller
jcoomes@916 363 // than the largest array of integers; it uses a single object to fill the
jcoomes@916 364 // region and has slightly less overhead.
jcoomes@916 365 static size_t min_fill_size() {
jcoomes@916 366 return size_t(align_object_size(oopDesc::header_size()));
jcoomes@916 367 }
jcoomes@916 368
johnc@1600 369 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
jcoomes@916 370
johnc@1600 371 static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
johnc@1600 372 static void fill_with_object(MemRegion region, bool zap = true) {
johnc@1600 373 fill_with_object(region.start(), region.word_size(), zap);
jcoomes@916 374 }
johnc@1600 375 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
johnc@1600 376 fill_with_object(start, pointer_delta(end, start), zap);
jcoomes@916 377 }
jcoomes@916 378
duke@435 379 // Some heaps may offer a contiguous region for shared non-blocking
duke@435 380 // allocation, via inlined code (by exporting the address of the top and
duke@435 381 // end fields defining the extent of the contiguous allocation region.)
duke@435 382
duke@435 383 // This function returns "true" iff the heap supports this kind of
duke@435 384 // allocation. (Default is "no".)
duke@435 385 virtual bool supports_inline_contig_alloc() const {
duke@435 386 return false;
duke@435 387 }
duke@435 388 // These functions return the addresses of the fields that define the
duke@435 389 // boundaries of the contiguous allocation area. (These fields should be
duke@435 390 // physically near to one another.)
duke@435 391 virtual HeapWord** top_addr() const {
duke@435 392 guarantee(false, "inline contiguous allocation not supported");
duke@435 393 return NULL;
duke@435 394 }
duke@435 395 virtual HeapWord** end_addr() const {
duke@435 396 guarantee(false, "inline contiguous allocation not supported");
duke@435 397 return NULL;
duke@435 398 }
duke@435 399
duke@435 400 // Some heaps may be in an unparseable state at certain times between
duke@435 401 // collections. This may be necessary for efficient implementation of
duke@435 402 // certain allocation-related activities. Calling this function before
duke@435 403 // attempting to parse a heap ensures that the heap is in a parsable
duke@435 404 // state (provided other concurrent activity does not introduce
duke@435 405 // unparsability). It is normally expected, therefore, that this
duke@435 406 // method is invoked with the world stopped.
duke@435 407 // NOTE: if you override this method, make sure you call
duke@435 408 // super::ensure_parsability so that the non-generational
duke@435 409 // part of the work gets done. See implementation of
duke@435 410 // CollectedHeap::ensure_parsability and, for instance,
duke@435 411 // that of GenCollectedHeap::ensure_parsability().
duke@435 412 // The argument "retire_tlabs" controls whether existing TLABs
duke@435 413 // are merely filled or also retired, thus preventing further
duke@435 414 // allocation from them and necessitating allocation of new TLABs.
duke@435 415 virtual void ensure_parsability(bool retire_tlabs);
duke@435 416
duke@435 417 // Return an estimate of the maximum allocation that could be performed
duke@435 418 // without triggering any collection or expansion activity. In a
duke@435 419 // generational collector, for example, this is probably the largest
duke@435 420 // allocation that could be supported (without expansion) in the youngest
duke@435 421 // generation. It is "unsafe" because no locks are taken; the result
duke@435 422 // should be treated as an approximation, not a guarantee, for use in
duke@435 423 // heuristic resizing decisions.
duke@435 424 virtual size_t unsafe_max_alloc() = 0;
duke@435 425
duke@435 426 // Section on thread-local allocation buffers (TLABs)
duke@435 427 // If the heap supports thread-local allocation buffers, it should override
duke@435 428 // the following methods:
duke@435 429 // Returns "true" iff the heap supports thread-local allocation buffers.
duke@435 430 // The default is "no".
duke@435 431 virtual bool supports_tlab_allocation() const {
duke@435 432 return false;
duke@435 433 }
duke@435 434 // The amount of space available for thread-local allocation buffers.
duke@435 435 virtual size_t tlab_capacity(Thread *thr) const {
duke@435 436 guarantee(false, "thread-local allocation buffers not supported");
duke@435 437 return 0;
duke@435 438 }
duke@435 439 // An estimate of the maximum allocation that could be performed
duke@435 440 // for thread-local allocation buffers without triggering any
duke@435 441 // collection or expansion activity.
duke@435 442 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
duke@435 443 guarantee(false, "thread-local allocation buffers not supported");
duke@435 444 return 0;
duke@435 445 }
ysr@1462 446
duke@435 447 // Can a compiler initialize a new object without store barriers?
duke@435 448 // This permission only extends from the creation of a new object
ysr@1462 449 // via a TLAB up to the first subsequent safepoint. If such permission
ysr@1462 450 // is granted for this heap type, the compiler promises to call
ysr@1462 451 // defer_store_barrier() below on any slow path allocation of
ysr@1462 452 // a new object for which such initializing store barriers will
ysr@1462 453 // have been elided.
ysr@777 454 virtual bool can_elide_tlab_store_barriers() const = 0;
ysr@777 455
duke@435 456 // If a compiler is eliding store barriers for TLAB-allocated objects,
duke@435 457 // there is probably a corresponding slow path which can produce
duke@435 458 // an object allocated anywhere. The compiler's runtime support
duke@435 459 // promises to call this function on such a slow-path-allocated
duke@435 460 // object before performing initializations that have elided
ysr@1462 461 // store barriers. Returns new_obj, or maybe a safer copy thereof.
ysr@1601 462 virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
ysr@1462 463
ysr@1462 464 // Answers whether an initializing store to a new object currently
ysr@1601 465 // allocated at the given address doesn't need a store
ysr@1462 466 // barrier. Returns "true" if it doesn't need an initializing
ysr@1462 467 // store barrier; answers "false" if it does.
ysr@1462 468 virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
ysr@1462 469
ysr@1601 470 // If a compiler is eliding store barriers for TLAB-allocated objects,
ysr@1601 471 // we will be informed of a slow-path allocation by a call
ysr@1601 472 // to new_store_pre_barrier() above. Such a call precedes the
ysr@1601 473 // initialization of the object itself, and no post-store-barriers will
ysr@1601 474 // be issued. Some heap types require that the barrier strictly follows
ysr@1601 475 // the initializing stores. (This is currently implemented by deferring the
ysr@1601 476 // barrier until the next slow-path allocation or gc-related safepoint.)
ysr@1601 477 // This interface answers whether a particular heap type needs the card
ysr@1601 478 // mark to be thus strictly sequenced after the stores.
ysr@1601 479 virtual bool card_mark_must_follow_store() const = 0;
ysr@1601 480
ysr@1462 481 // If the CollectedHeap was asked to defer a store barrier above,
ysr@1462 482 // this informs it to flush such a deferred store barrier to the
ysr@1462 483 // remembered set.
ysr@1462 484 virtual void flush_deferred_store_barrier(JavaThread* thread);
duke@435 485
duke@435 486 // Can a compiler elide a store barrier when it writes
duke@435 487 // a permanent oop into the heap? Applies when the compiler
duke@435 488 // is storing x to the heap, where x->is_perm() is true.
ysr@777 489 virtual bool can_elide_permanent_oop_store_barriers() const = 0;
duke@435 490
duke@435 491 // Does this heap support heap inspection (+PrintClassHistogram?)
ysr@777 492 virtual bool supports_heap_inspection() const = 0;
duke@435 493
duke@435 494 // Perform a collection of the heap; intended for use in implementing
duke@435 495 // "System.gc". This probably implies as full a collection as the
duke@435 496 // "CollectedHeap" supports.
duke@435 497 virtual void collect(GCCause::Cause cause) = 0;
duke@435 498
duke@435 499 // This interface assumes that it's being called by the
duke@435 500 // vm thread. It collects the heap assuming that the
duke@435 501 // heap lock is already held and that we are executing in
duke@435 502 // the context of the vm thread.
duke@435 503 virtual void collect_as_vm_thread(GCCause::Cause cause) = 0;
duke@435 504
duke@435 505 // Returns the barrier set for this heap
duke@435 506 BarrierSet* barrier_set() { return _barrier_set; }
duke@435 507
duke@435 508 // Returns "true" iff there is a stop-world GC in progress. (I assume
duke@435 509 // that it should answer "false" for the concurrent part of a concurrent
duke@435 510 // collector -- dld).
duke@435 511 bool is_gc_active() const { return _is_gc_active; }
duke@435 512
duke@435 513 // Total number of GC collections (started)
duke@435 514 unsigned int total_collections() const { return _total_collections; }
duke@435 515 unsigned int total_full_collections() const { return _total_full_collections;}
duke@435 516
duke@435 517 // Increment total number of GC collections (started)
duke@435 518 // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
duke@435 519 void increment_total_collections(bool full = false) {
duke@435 520 _total_collections++;
duke@435 521 if (full) {
duke@435 522 increment_total_full_collections();
duke@435 523 }
duke@435 524 }
duke@435 525
duke@435 526 void increment_total_full_collections() { _total_full_collections++; }
duke@435 527
duke@435 528 // Return the AdaptiveSizePolicy for the heap.
duke@435 529 virtual AdaptiveSizePolicy* size_policy() = 0;
duke@435 530
jmasa@1822 531 // Return the CollectorPolicy for the heap
jmasa@1822 532 virtual CollectorPolicy* collector_policy() const = 0;
jmasa@1822 533
duke@435 534 // Iterate over all the ref-containing fields of all objects, calling
duke@435 535 // "cl.do_oop" on each. This includes objects in permanent memory.
duke@435 536 virtual void oop_iterate(OopClosure* cl) = 0;
duke@435 537
duke@435 538 // Iterate over all objects, calling "cl.do_object" on each.
duke@435 539 // This includes objects in permanent memory.
duke@435 540 virtual void object_iterate(ObjectClosure* cl) = 0;
duke@435 541
jmasa@952 542 // Similar to object_iterate() except iterates only
jmasa@952 543 // over live objects.
jmasa@952 544 virtual void safe_object_iterate(ObjectClosure* cl) = 0;
jmasa@952 545
duke@435 546 // Behaves the same as oop_iterate, except only traverses
duke@435 547 // interior pointers contained in permanent memory. If there
duke@435 548 // is no permanent memory, does nothing.
duke@435 549 virtual void permanent_oop_iterate(OopClosure* cl) = 0;
duke@435 550
duke@435 551 // Behaves the same as object_iterate, except only traverses
duke@435 552 // object contained in permanent memory. If there is no
duke@435 553 // permanent memory, does nothing.
duke@435 554 virtual void permanent_object_iterate(ObjectClosure* cl) = 0;
duke@435 555
duke@435 556 // NOTE! There is no requirement that a collector implement these
duke@435 557 // functions.
duke@435 558 //
duke@435 559 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
duke@435 560 // each address in the (reserved) heap is a member of exactly
duke@435 561 // one block. The defining characteristic of a block is that it is
duke@435 562 // possible to find its size, and thus to progress forward to the next
duke@435 563 // block. (Blocks may be of different sizes.) Thus, blocks may
duke@435 564 // represent Java objects, or they might be free blocks in a
duke@435 565 // free-list-based heap (or subheap), as long as the two kinds are
duke@435 566 // distinguishable and the size of each is determinable.
duke@435 567
duke@435 568 // Returns the address of the start of the "block" that contains the
duke@435 569 // address "addr". We say "blocks" instead of "object" since some heaps
duke@435 570 // may not pack objects densely; a chunk may either be an object or a
duke@435 571 // non-object.
duke@435 572 virtual HeapWord* block_start(const void* addr) const = 0;
duke@435 573
duke@435 574 // Requires "addr" to be the start of a chunk, and returns its size.
duke@435 575 // "addr + size" is required to be the start of a new chunk, or the end
duke@435 576 // of the active area of the heap.
duke@435 577 virtual size_t block_size(const HeapWord* addr) const = 0;
duke@435 578
duke@435 579 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 580 // the block is an object.
duke@435 581 virtual bool block_is_obj(const HeapWord* addr) const = 0;
duke@435 582
duke@435 583 // Returns the longest time (in ms) that has elapsed since the last
duke@435 584 // time that any part of the heap was examined by a garbage collection.
duke@435 585 virtual jlong millis_since_last_gc() = 0;
duke@435 586
duke@435 587 // Perform any cleanup actions necessary before allowing a verification.
duke@435 588 virtual void prepare_for_verify() = 0;
duke@435 589
ysr@1050 590 // Generate any dumps preceding or following a full gc
ysr@1050 591 void pre_full_gc_dump();
ysr@1050 592 void post_full_gc_dump();
ysr@1050 593
duke@435 594 virtual void print() const = 0;
duke@435 595 virtual void print_on(outputStream* st) const = 0;
duke@435 596
duke@435 597 // Print all GC threads (other than the VM thread)
duke@435 598 // used by this heap.
duke@435 599 virtual void print_gc_threads_on(outputStream* st) const = 0;
duke@435 600 void print_gc_threads() { print_gc_threads_on(tty); }
duke@435 601 // Iterator for all GC threads (other than VM thread)
duke@435 602 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
duke@435 603
duke@435 604 // Print any relevant tracing info that flags imply.
duke@435 605 // Default implementation does nothing.
duke@435 606 virtual void print_tracing_info() const = 0;
duke@435 607
duke@435 608 // Heap verification
johnc@2969 609 virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
duke@435 610
duke@435 611 // Non product verification and debugging.
duke@435 612 #ifndef PRODUCT
duke@435 613 // Support for PromotionFailureALot. Return true if it's time to cause a
duke@435 614 // promotion failure. The no-argument version uses
duke@435 615 // this->_promotion_failure_alot_count as the counter.
duke@435 616 inline bool promotion_should_fail(volatile size_t* count);
duke@435 617 inline bool promotion_should_fail();
duke@435 618
duke@435 619 // Reset the PromotionFailureALot counters. Should be called at the end of a
duke@435 620 // GC in which promotion failure ocurred.
duke@435 621 inline void reset_promotion_should_fail(volatile size_t* count);
duke@435 622 inline void reset_promotion_should_fail();
duke@435 623 #endif // #ifndef PRODUCT
duke@435 624
duke@435 625 #ifdef ASSERT
duke@435 626 static int fired_fake_oom() {
duke@435 627 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
duke@435 628 }
duke@435 629 #endif
jmasa@2188 630
jmasa@2188 631 public:
jmasa@2188 632 // This is a convenience method that is used in cases where
jmasa@2188 633 // the actual number of GC worker threads is not pertinent but
jmasa@2188 634 // only whether there more than 0. Use of this method helps
jmasa@2188 635 // reduce the occurrence of ParallelGCThreads to uses where the
jmasa@2188 636 // actual number may be germane.
jmasa@2188 637 static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
duke@435 638 };
duke@435 639
duke@435 640 // Class to set and reset the GC cause for a CollectedHeap.
duke@435 641
duke@435 642 class GCCauseSetter : StackObj {
duke@435 643 CollectedHeap* _heap;
duke@435 644 GCCause::Cause _previous_cause;
duke@435 645 public:
duke@435 646 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
duke@435 647 assert(SafepointSynchronize::is_at_safepoint(),
duke@435 648 "This method manipulates heap state without locking");
duke@435 649 _heap = heap;
duke@435 650 _previous_cause = _heap->gc_cause();
duke@435 651 _heap->set_gc_cause(cause);
duke@435 652 }
duke@435 653
duke@435 654 ~GCCauseSetter() {
duke@435 655 assert(SafepointSynchronize::is_at_safepoint(),
duke@435 656 "This method manipulates heap state without locking");
duke@435 657 _heap->set_gc_cause(_previous_cause);
duke@435 658 }
duke@435 659 };
stefank@2314 660
stefank@2314 661 #endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP

mercurial