src/share/vm/gc_interface/collectedHeap.hpp

Mon, 16 Apr 2012 08:57:18 +0200

author
brutisso
date
Mon, 16 Apr 2012 08:57:18 +0200
changeset 3711
b632e80fc9dc
parent 3675
9a9bb0010c91
child 3900
d2a62e0f25eb
permissions
-rw-r--r--

4988100: oop_verify_old_oop appears to be dead
Summary: removed oop_verify_old_oop and allow_dirty. Also reviewed by: alexlamsl@gmail.com
Reviewed-by: jmasa, jwilhelm

duke@435 1 /*
never@3499 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
stefank@2314 26 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
stefank@2314 27
stefank@2314 28 #include "gc_interface/gcCause.hpp"
stefank@2314 29 #include "memory/allocation.hpp"
stefank@2314 30 #include "memory/barrierSet.hpp"
stefank@2314 31 #include "runtime/handles.hpp"
stefank@2314 32 #include "runtime/perfData.hpp"
stefank@2314 33 #include "runtime/safepoint.hpp"
never@3499 34 #include "utilities/events.hpp"
stefank@2314 35
duke@435 36 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
duke@435 37 // is an abstract class: there may be many different kinds of heaps. This
duke@435 38 // class defines the functions that a heap must implement, and contains
duke@435 39 // infrastructure common to all heaps.
duke@435 40
duke@435 41 class BarrierSet;
duke@435 42 class ThreadClosure;
duke@435 43 class AdaptiveSizePolicy;
duke@435 44 class Thread;
jmasa@1822 45 class CollectorPolicy;
duke@435 46
never@3499 47 class GCMessage : public FormatBuffer<1024> {
never@3499 48 public:
never@3499 49 bool is_before;
never@3499 50
never@3499 51 public:
never@3499 52 GCMessage() {}
never@3499 53 };
never@3499 54
never@3499 55 class GCHeapLog : public EventLogBase<GCMessage> {
never@3499 56 private:
never@3499 57 void log_heap(bool before);
never@3499 58
never@3499 59 public:
never@3499 60 GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {}
never@3499 61
never@3499 62 void log_heap_before() {
never@3499 63 log_heap(true);
never@3499 64 }
never@3499 65 void log_heap_after() {
never@3499 66 log_heap(false);
never@3499 67 }
never@3499 68 };
never@3499 69
duke@435 70 //
duke@435 71 // CollectedHeap
duke@435 72 // SharedHeap
duke@435 73 // GenCollectedHeap
duke@435 74 // G1CollectedHeap
duke@435 75 // ParallelScavengeHeap
duke@435 76 //
duke@435 77 class CollectedHeap : public CHeapObj {
duke@435 78 friend class VMStructs;
duke@435 79 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
jmasa@977 80 friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe
duke@435 81
duke@435 82 #ifdef ASSERT
duke@435 83 static int _fire_out_of_memory_count;
duke@435 84 #endif
duke@435 85
jcoomes@916 86 // Used for filler objects (static, but initialized in ctor).
jcoomes@916 87 static size_t _filler_array_max_size;
jcoomes@916 88
never@3499 89 GCHeapLog* _gc_heap_log;
never@3499 90
ysr@1601 91 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
ysr@1601 92 bool _defer_initial_card_mark;
ysr@1601 93
duke@435 94 protected:
duke@435 95 MemRegion _reserved;
duke@435 96 BarrierSet* _barrier_set;
duke@435 97 bool _is_gc_active;
jmasa@3357 98 uint _n_par_threads;
jmasa@2188 99
duke@435 100 unsigned int _total_collections; // ... started
duke@435 101 unsigned int _total_full_collections; // ... started
duke@435 102 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
duke@435 103 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
duke@435 104
duke@435 105 // Reason for current garbage collection. Should be set to
duke@435 106 // a value reflecting no collection between collections.
duke@435 107 GCCause::Cause _gc_cause;
duke@435 108 GCCause::Cause _gc_lastcause;
duke@435 109 PerfStringVariable* _perf_gc_cause;
duke@435 110 PerfStringVariable* _perf_gc_lastcause;
duke@435 111
duke@435 112 // Constructor
duke@435 113 CollectedHeap();
duke@435 114
ysr@1601 115 // Do common initializations that must follow instance construction,
ysr@1601 116 // for example, those needing virtual calls.
ysr@1601 117 // This code could perhaps be moved into initialize() but would
ysr@1601 118 // be slightly more awkward because we want the latter to be a
ysr@1601 119 // pure virtual.
ysr@1601 120 void pre_initialize();
ysr@1601 121
tonyp@2971 122 // Create a new tlab. All TLAB allocations must go through this.
duke@435 123 virtual HeapWord* allocate_new_tlab(size_t size);
duke@435 124
duke@435 125 // Accumulate statistics on all tlabs.
duke@435 126 virtual void accumulate_statistics_all_tlabs();
duke@435 127
duke@435 128 // Reinitialize tlabs before resuming mutators.
duke@435 129 virtual void resize_all_tlabs();
duke@435 130
duke@435 131 // Allocate from the current thread's TLAB, with broken-out slow path.
duke@435 132 inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
duke@435 133 static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
duke@435 134
duke@435 135 // Allocate an uninitialized block of the given size, or returns NULL if
duke@435 136 // this is impossible.
tonyp@2971 137 inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS);
duke@435 138
duke@435 139 // Like allocate_init, but the block returned by a successful allocation
duke@435 140 // is guaranteed initialized to zeros.
tonyp@2971 141 inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS);
duke@435 142
duke@435 143 // Same as common_mem version, except memory is allocated in the permanent area
duke@435 144 // If there is no permanent area, revert to common_mem_allocate_noinit
duke@435 145 inline static HeapWord* common_permanent_mem_allocate_noinit(size_t size, TRAPS);
duke@435 146
duke@435 147 // Same as common_mem version, except memory is allocated in the permanent area
duke@435 148 // If there is no permanent area, revert to common_mem_allocate_init
duke@435 149 inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS);
duke@435 150
duke@435 151 // Helper functions for (VM) allocation.
brutisso@3675 152 inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj);
duke@435 153 inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
brutisso@3675 154 HeapWord* objPtr);
duke@435 155
brutisso@3675 156 inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj);
duke@435 157
duke@435 158 inline static void post_allocation_setup_array(KlassHandle klass,
brutisso@3675 159 HeapWord* obj, int length);
duke@435 160
duke@435 161 // Clears an allocated object.
duke@435 162 inline static void init_obj(HeapWord* obj, size_t size);
duke@435 163
jcoomes@916 164 // Filler object utilities.
jcoomes@916 165 static inline size_t filler_array_hdr_size();
jcoomes@916 166 static inline size_t filler_array_min_size();
jcoomes@916 167
jcoomes@916 168 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
johnc@1600 169 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
jcoomes@916 170
jcoomes@916 171 // Fill with a single array; caller must ensure filler_array_min_size() <=
jcoomes@916 172 // words <= filler_array_max_size().
johnc@1600 173 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
jcoomes@916 174
jcoomes@916 175 // Fill with a single object (either an int array or a java.lang.Object).
johnc@1600 176 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
jcoomes@916 177
duke@435 178 // Verification functions
duke@435 179 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
duke@435 180 PRODUCT_RETURN;
duke@435 181 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
duke@435 182 PRODUCT_RETURN;
jmasa@977 183 debug_only(static void check_for_valid_allocation_state();)
duke@435 184
duke@435 185 public:
duke@435 186 enum Name {
duke@435 187 Abstract,
duke@435 188 SharedHeap,
duke@435 189 GenCollectedHeap,
duke@435 190 ParallelScavengeHeap,
duke@435 191 G1CollectedHeap
duke@435 192 };
duke@435 193
brutisso@3668 194 static inline size_t filler_array_max_size() {
brutisso@3668 195 return _filler_array_max_size;
brutisso@3668 196 }
brutisso@3668 197
duke@435 198 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
duke@435 199
duke@435 200 /**
duke@435 201 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
duke@435 202 * and JNI_OK on success.
duke@435 203 */
duke@435 204 virtual jint initialize() = 0;
duke@435 205
duke@435 206 // In many heaps, there will be a need to perform some initialization activities
duke@435 207 // after the Universe is fully formed, but before general heap allocation is allowed.
duke@435 208 // This is the correct place to place such initialization methods.
duke@435 209 virtual void post_initialize() = 0;
duke@435 210
duke@435 211 MemRegion reserved_region() const { return _reserved; }
coleenp@548 212 address base() const { return (address)reserved_region().start(); }
duke@435 213
duke@435 214 // Future cleanup here. The following functions should specify bytes or
duke@435 215 // heapwords as part of their signature.
duke@435 216 virtual size_t capacity() const = 0;
duke@435 217 virtual size_t used() const = 0;
duke@435 218
duke@435 219 // Return "true" if the part of the heap that allocates Java
duke@435 220 // objects has reached the maximal committed limit that it can
duke@435 221 // reach, without a garbage collection.
duke@435 222 virtual bool is_maximal_no_gc() const = 0;
duke@435 223
duke@435 224 virtual size_t permanent_capacity() const = 0;
duke@435 225 virtual size_t permanent_used() const = 0;
duke@435 226
duke@435 227 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
duke@435 228 // memory that the vm could make available for storing 'normal' java objects.
duke@435 229 // This is based on the reserved address space, but should not include space
duke@435 230 // that the vm uses internally for bookkeeping or temporary storage (e.g.,
duke@435 231 // perm gen space or, in the case of the young gen, one of the survivor
duke@435 232 // spaces).
duke@435 233 virtual size_t max_capacity() const = 0;
duke@435 234
duke@435 235 // Returns "TRUE" if "p" points into the reserved area of the heap.
duke@435 236 bool is_in_reserved(const void* p) const {
duke@435 237 return _reserved.contains(p);
duke@435 238 }
duke@435 239
duke@435 240 bool is_in_reserved_or_null(const void* p) const {
duke@435 241 return p == NULL || is_in_reserved(p);
duke@435 242 }
duke@435 243
stefank@3335 244 // Returns "TRUE" iff "p" points into the committed areas of the heap.
stefank@3335 245 // Since this method can be expensive in general, we restrict its
duke@435 246 // use to assertion checking only.
duke@435 247 virtual bool is_in(const void* p) const = 0;
duke@435 248
duke@435 249 bool is_in_or_null(const void* p) const {
duke@435 250 return p == NULL || is_in(p);
duke@435 251 }
duke@435 252
duke@435 253 // Let's define some terms: a "closed" subset of a heap is one that
duke@435 254 //
duke@435 255 // 1) contains all currently-allocated objects, and
duke@435 256 //
duke@435 257 // 2) is closed under reference: no object in the closed subset
duke@435 258 // references one outside the closed subset.
duke@435 259 //
duke@435 260 // Membership in a heap's closed subset is useful for assertions.
duke@435 261 // Clearly, the entire heap is a closed subset, so the default
duke@435 262 // implementation is to use "is_in_reserved". But this may not be too
duke@435 263 // liberal to perform useful checking. Also, the "is_in" predicate
duke@435 264 // defines a closed subset, but may be too expensive, since "is_in"
duke@435 265 // verifies that its argument points to an object head. The
duke@435 266 // "closed_subset" method allows a heap to define an intermediate
duke@435 267 // predicate, allowing more precise checking than "is_in_reserved" at
duke@435 268 // lower cost than "is_in."
duke@435 269
duke@435 270 // One important case is a heap composed of disjoint contiguous spaces,
duke@435 271 // such as the Garbage-First collector. Such heaps have a convenient
duke@435 272 // closed subset consisting of the allocated portions of those
duke@435 273 // contiguous spaces.
duke@435 274
duke@435 275 // Return "TRUE" iff the given pointer points into the heap's defined
duke@435 276 // closed subset (which defaults to the entire heap).
duke@435 277 virtual bool is_in_closed_subset(const void* p) const {
duke@435 278 return is_in_reserved(p);
duke@435 279 }
duke@435 280
duke@435 281 bool is_in_closed_subset_or_null(const void* p) const {
duke@435 282 return p == NULL || is_in_closed_subset(p);
duke@435 283 }
duke@435 284
ysr@1376 285 // XXX is_permanent() and is_in_permanent() should be better named
ysr@1376 286 // to distinguish one from the other.
ysr@1376 287
duke@435 288 // Returns "TRUE" if "p" is allocated as "permanent" data.
duke@435 289 // If the heap does not use "permanent" data, returns the same
duke@435 290 // value is_in_reserved() would return.
duke@435 291 // NOTE: this actually returns true if "p" is in reserved space
duke@435 292 // for the space not that it is actually allocated (i.e. in committed
duke@435 293 // space). If you need the more conservative answer use is_permanent().
duke@435 294 virtual bool is_in_permanent(const void *p) const = 0;
duke@435 295
jmasa@2909 296
jmasa@2909 297 #ifdef ASSERT
jmasa@2909 298 // Returns true if "p" is in the part of the
jmasa@2909 299 // heap being collected.
jmasa@2909 300 virtual bool is_in_partial_collection(const void *p) = 0;
jmasa@2909 301 #endif
jmasa@2909 302
ysr@1376 303 bool is_in_permanent_or_null(const void *p) const {
ysr@1376 304 return p == NULL || is_in_permanent(p);
ysr@1376 305 }
ysr@1376 306
duke@435 307 // Returns "TRUE" if "p" is in the committed area of "permanent" data.
duke@435 308 // If the heap does not use "permanent" data, returns the same
duke@435 309 // value is_in() would return.
duke@435 310 virtual bool is_permanent(const void *p) const = 0;
duke@435 311
ysr@1376 312 bool is_permanent_or_null(const void *p) const {
ysr@1376 313 return p == NULL || is_permanent(p);
duke@435 314 }
duke@435 315
jrose@1424 316 // An object is scavengable if its location may move during a scavenge.
jrose@1424 317 // (A scavenge is a GC which is not a full GC.)
jmasa@2909 318 virtual bool is_scavengable(const void *p) = 0;
jrose@1424 319
duke@435 320 // Returns "TRUE" if "p" is a method oop in the
duke@435 321 // current heap, with high probability. This predicate
duke@435 322 // is not stable, in general.
duke@435 323 bool is_valid_method(oop p) const;
duke@435 324
duke@435 325 void set_gc_cause(GCCause::Cause v) {
duke@435 326 if (UsePerfData) {
duke@435 327 _gc_lastcause = _gc_cause;
duke@435 328 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
duke@435 329 _perf_gc_cause->set_value(GCCause::to_string(v));
duke@435 330 }
duke@435 331 _gc_cause = v;
duke@435 332 }
duke@435 333 GCCause::Cause gc_cause() { return _gc_cause; }
duke@435 334
jmasa@2188 335 // Number of threads currently working on GC tasks.
jmasa@3357 336 uint n_par_threads() { return _n_par_threads; }
jmasa@2188 337
jmasa@2188 338 // May be overridden to set additional parallelism.
jmasa@3357 339 virtual void set_par_threads(uint t) { _n_par_threads = t; };
jmasa@2188 340
duke@435 341 // Preload classes into the shared portion of the heap, and then dump
duke@435 342 // that data to a file so that it can be loaded directly by another
duke@435 343 // VM (then terminate).
duke@435 344 virtual void preload_and_dump(TRAPS) { ShouldNotReachHere(); }
duke@435 345
never@3205 346 // Allocate and initialize instances of Class
never@3205 347 static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS);
never@3205 348
duke@435 349 // General obj/array allocation facilities.
duke@435 350 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
duke@435 351 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
kvn@3157 352 inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS);
duke@435 353
duke@435 354 // Special obj/array allocation facilities.
duke@435 355 // Some heaps may want to manage "permanent" data uniquely. These default
duke@435 356 // to the general routines if the heap does not support such handling.
duke@435 357 inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS);
duke@435 358 // permanent_obj_allocate_no_klass_install() does not do the installation of
duke@435 359 // the klass pointer in the newly created object (as permanent_obj_allocate()
duke@435 360 // above does). This allows for a delay in the installation of the klass
duke@435 361 // pointer that is needed during the create of klassKlass's. The
duke@435 362 // method post_allocation_install_obj_klass() is used to install the
duke@435 363 // klass pointer.
duke@435 364 inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass,
duke@435 365 int size,
duke@435 366 TRAPS);
brutisso@3675 367 inline static void post_allocation_install_obj_klass(KlassHandle klass, oop obj);
duke@435 368 inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
duke@435 369
duke@435 370 // Raw memory allocation facilities
duke@435 371 // The obj and array allocate methods are covers for these methods.
duke@435 372 // The permanent allocation method should default to mem_allocate if
tonyp@2971 373 // permanent memory isn't supported. mem_allocate() should never be
tonyp@2971 374 // called to allocate TLABs, only individual objects.
duke@435 375 virtual HeapWord* mem_allocate(size_t size,
duke@435 376 bool* gc_overhead_limit_was_exceeded) = 0;
duke@435 377 virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
duke@435 378
jcoomes@916 379 // Utilities for turning raw memory into filler objects.
jcoomes@916 380 //
jcoomes@916 381 // min_fill_size() is the smallest region that can be filled.
jcoomes@916 382 // fill_with_objects() can fill arbitrary-sized regions of the heap using
jcoomes@916 383 // multiple objects. fill_with_object() is for regions known to be smaller
jcoomes@916 384 // than the largest array of integers; it uses a single object to fill the
jcoomes@916 385 // region and has slightly less overhead.
jcoomes@916 386 static size_t min_fill_size() {
jcoomes@916 387 return size_t(align_object_size(oopDesc::header_size()));
jcoomes@916 388 }
jcoomes@916 389
johnc@1600 390 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
jcoomes@916 391
johnc@1600 392 static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
johnc@1600 393 static void fill_with_object(MemRegion region, bool zap = true) {
johnc@1600 394 fill_with_object(region.start(), region.word_size(), zap);
jcoomes@916 395 }
johnc@1600 396 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
johnc@1600 397 fill_with_object(start, pointer_delta(end, start), zap);
jcoomes@916 398 }
jcoomes@916 399
duke@435 400 // Some heaps may offer a contiguous region for shared non-blocking
duke@435 401 // allocation, via inlined code (by exporting the address of the top and
duke@435 402 // end fields defining the extent of the contiguous allocation region.)
duke@435 403
duke@435 404 // This function returns "true" iff the heap supports this kind of
duke@435 405 // allocation. (Default is "no".)
duke@435 406 virtual bool supports_inline_contig_alloc() const {
duke@435 407 return false;
duke@435 408 }
duke@435 409 // These functions return the addresses of the fields that define the
duke@435 410 // boundaries of the contiguous allocation area. (These fields should be
duke@435 411 // physically near to one another.)
duke@435 412 virtual HeapWord** top_addr() const {
duke@435 413 guarantee(false, "inline contiguous allocation not supported");
duke@435 414 return NULL;
duke@435 415 }
duke@435 416 virtual HeapWord** end_addr() const {
duke@435 417 guarantee(false, "inline contiguous allocation not supported");
duke@435 418 return NULL;
duke@435 419 }
duke@435 420
duke@435 421 // Some heaps may be in an unparseable state at certain times between
duke@435 422 // collections. This may be necessary for efficient implementation of
duke@435 423 // certain allocation-related activities. Calling this function before
duke@435 424 // attempting to parse a heap ensures that the heap is in a parsable
duke@435 425 // state (provided other concurrent activity does not introduce
duke@435 426 // unparsability). It is normally expected, therefore, that this
duke@435 427 // method is invoked with the world stopped.
duke@435 428 // NOTE: if you override this method, make sure you call
duke@435 429 // super::ensure_parsability so that the non-generational
duke@435 430 // part of the work gets done. See implementation of
duke@435 431 // CollectedHeap::ensure_parsability and, for instance,
duke@435 432 // that of GenCollectedHeap::ensure_parsability().
duke@435 433 // The argument "retire_tlabs" controls whether existing TLABs
duke@435 434 // are merely filled or also retired, thus preventing further
duke@435 435 // allocation from them and necessitating allocation of new TLABs.
duke@435 436 virtual void ensure_parsability(bool retire_tlabs);
duke@435 437
duke@435 438 // Return an estimate of the maximum allocation that could be performed
duke@435 439 // without triggering any collection or expansion activity. In a
duke@435 440 // generational collector, for example, this is probably the largest
duke@435 441 // allocation that could be supported (without expansion) in the youngest
duke@435 442 // generation. It is "unsafe" because no locks are taken; the result
duke@435 443 // should be treated as an approximation, not a guarantee, for use in
duke@435 444 // heuristic resizing decisions.
duke@435 445 virtual size_t unsafe_max_alloc() = 0;
duke@435 446
duke@435 447 // Section on thread-local allocation buffers (TLABs)
duke@435 448 // If the heap supports thread-local allocation buffers, it should override
duke@435 449 // the following methods:
duke@435 450 // Returns "true" iff the heap supports thread-local allocation buffers.
duke@435 451 // The default is "no".
duke@435 452 virtual bool supports_tlab_allocation() const {
duke@435 453 return false;
duke@435 454 }
duke@435 455 // The amount of space available for thread-local allocation buffers.
duke@435 456 virtual size_t tlab_capacity(Thread *thr) const {
duke@435 457 guarantee(false, "thread-local allocation buffers not supported");
duke@435 458 return 0;
duke@435 459 }
duke@435 460 // An estimate of the maximum allocation that could be performed
duke@435 461 // for thread-local allocation buffers without triggering any
duke@435 462 // collection or expansion activity.
duke@435 463 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
duke@435 464 guarantee(false, "thread-local allocation buffers not supported");
duke@435 465 return 0;
duke@435 466 }
ysr@1462 467
duke@435 468 // Can a compiler initialize a new object without store barriers?
duke@435 469 // This permission only extends from the creation of a new object
ysr@1462 470 // via a TLAB up to the first subsequent safepoint. If such permission
ysr@1462 471 // is granted for this heap type, the compiler promises to call
ysr@1462 472 // defer_store_barrier() below on any slow path allocation of
ysr@1462 473 // a new object for which such initializing store barriers will
ysr@1462 474 // have been elided.
ysr@777 475 virtual bool can_elide_tlab_store_barriers() const = 0;
ysr@777 476
duke@435 477 // If a compiler is eliding store barriers for TLAB-allocated objects,
duke@435 478 // there is probably a corresponding slow path which can produce
duke@435 479 // an object allocated anywhere. The compiler's runtime support
duke@435 480 // promises to call this function on such a slow-path-allocated
duke@435 481 // object before performing initializations that have elided
ysr@1462 482 // store barriers. Returns new_obj, or maybe a safer copy thereof.
ysr@1601 483 virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
ysr@1462 484
ysr@1462 485 // Answers whether an initializing store to a new object currently
ysr@1601 486 // allocated at the given address doesn't need a store
ysr@1462 487 // barrier. Returns "true" if it doesn't need an initializing
ysr@1462 488 // store barrier; answers "false" if it does.
ysr@1462 489 virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
ysr@1462 490
ysr@1601 491 // If a compiler is eliding store barriers for TLAB-allocated objects,
ysr@1601 492 // we will be informed of a slow-path allocation by a call
ysr@1601 493 // to new_store_pre_barrier() above. Such a call precedes the
ysr@1601 494 // initialization of the object itself, and no post-store-barriers will
ysr@1601 495 // be issued. Some heap types require that the barrier strictly follows
ysr@1601 496 // the initializing stores. (This is currently implemented by deferring the
ysr@1601 497 // barrier until the next slow-path allocation or gc-related safepoint.)
ysr@1601 498 // This interface answers whether a particular heap type needs the card
ysr@1601 499 // mark to be thus strictly sequenced after the stores.
ysr@1601 500 virtual bool card_mark_must_follow_store() const = 0;
ysr@1601 501
ysr@1462 502 // If the CollectedHeap was asked to defer a store barrier above,
ysr@1462 503 // this informs it to flush such a deferred store barrier to the
ysr@1462 504 // remembered set.
ysr@1462 505 virtual void flush_deferred_store_barrier(JavaThread* thread);
duke@435 506
duke@435 507 // Can a compiler elide a store barrier when it writes
duke@435 508 // a permanent oop into the heap? Applies when the compiler
duke@435 509 // is storing x to the heap, where x->is_perm() is true.
ysr@777 510 virtual bool can_elide_permanent_oop_store_barriers() const = 0;
duke@435 511
duke@435 512 // Does this heap support heap inspection (+PrintClassHistogram?)
ysr@777 513 virtual bool supports_heap_inspection() const = 0;
duke@435 514
duke@435 515 // Perform a collection of the heap; intended for use in implementing
duke@435 516 // "System.gc". This probably implies as full a collection as the
duke@435 517 // "CollectedHeap" supports.
duke@435 518 virtual void collect(GCCause::Cause cause) = 0;
duke@435 519
duke@435 520 // This interface assumes that it's being called by the
duke@435 521 // vm thread. It collects the heap assuming that the
duke@435 522 // heap lock is already held and that we are executing in
duke@435 523 // the context of the vm thread.
duke@435 524 virtual void collect_as_vm_thread(GCCause::Cause cause) = 0;
duke@435 525
duke@435 526 // Returns the barrier set for this heap
duke@435 527 BarrierSet* barrier_set() { return _barrier_set; }
duke@435 528
duke@435 529 // Returns "true" iff there is a stop-world GC in progress. (I assume
duke@435 530 // that it should answer "false" for the concurrent part of a concurrent
duke@435 531 // collector -- dld).
duke@435 532 bool is_gc_active() const { return _is_gc_active; }
duke@435 533
duke@435 534 // Total number of GC collections (started)
duke@435 535 unsigned int total_collections() const { return _total_collections; }
duke@435 536 unsigned int total_full_collections() const { return _total_full_collections;}
duke@435 537
duke@435 538 // Increment total number of GC collections (started)
duke@435 539 // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
duke@435 540 void increment_total_collections(bool full = false) {
duke@435 541 _total_collections++;
duke@435 542 if (full) {
duke@435 543 increment_total_full_collections();
duke@435 544 }
duke@435 545 }
duke@435 546
duke@435 547 void increment_total_full_collections() { _total_full_collections++; }
duke@435 548
duke@435 549 // Return the AdaptiveSizePolicy for the heap.
duke@435 550 virtual AdaptiveSizePolicy* size_policy() = 0;
duke@435 551
jmasa@1822 552 // Return the CollectorPolicy for the heap
jmasa@1822 553 virtual CollectorPolicy* collector_policy() const = 0;
jmasa@1822 554
duke@435 555 // Iterate over all the ref-containing fields of all objects, calling
duke@435 556 // "cl.do_oop" on each. This includes objects in permanent memory.
duke@435 557 virtual void oop_iterate(OopClosure* cl) = 0;
duke@435 558
duke@435 559 // Iterate over all objects, calling "cl.do_object" on each.
duke@435 560 // This includes objects in permanent memory.
duke@435 561 virtual void object_iterate(ObjectClosure* cl) = 0;
duke@435 562
jmasa@952 563 // Similar to object_iterate() except iterates only
jmasa@952 564 // over live objects.
jmasa@952 565 virtual void safe_object_iterate(ObjectClosure* cl) = 0;
jmasa@952 566
duke@435 567 // Behaves the same as oop_iterate, except only traverses
duke@435 568 // interior pointers contained in permanent memory. If there
duke@435 569 // is no permanent memory, does nothing.
duke@435 570 virtual void permanent_oop_iterate(OopClosure* cl) = 0;
duke@435 571
duke@435 572 // Behaves the same as object_iterate, except only traverses
duke@435 573 // object contained in permanent memory. If there is no
duke@435 574 // permanent memory, does nothing.
duke@435 575 virtual void permanent_object_iterate(ObjectClosure* cl) = 0;
duke@435 576
duke@435 577 // NOTE! There is no requirement that a collector implement these
duke@435 578 // functions.
duke@435 579 //
duke@435 580 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
duke@435 581 // each address in the (reserved) heap is a member of exactly
duke@435 582 // one block. The defining characteristic of a block is that it is
duke@435 583 // possible to find its size, and thus to progress forward to the next
duke@435 584 // block. (Blocks may be of different sizes.) Thus, blocks may
duke@435 585 // represent Java objects, or they might be free blocks in a
duke@435 586 // free-list-based heap (or subheap), as long as the two kinds are
duke@435 587 // distinguishable and the size of each is determinable.
duke@435 588
duke@435 589 // Returns the address of the start of the "block" that contains the
duke@435 590 // address "addr". We say "blocks" instead of "object" since some heaps
duke@435 591 // may not pack objects densely; a chunk may either be an object or a
duke@435 592 // non-object.
duke@435 593 virtual HeapWord* block_start(const void* addr) const = 0;
duke@435 594
duke@435 595 // Requires "addr" to be the start of a chunk, and returns its size.
duke@435 596 // "addr + size" is required to be the start of a new chunk, or the end
duke@435 597 // of the active area of the heap.
duke@435 598 virtual size_t block_size(const HeapWord* addr) const = 0;
duke@435 599
duke@435 600 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 601 // the block is an object.
duke@435 602 virtual bool block_is_obj(const HeapWord* addr) const = 0;
duke@435 603
duke@435 604 // Returns the longest time (in ms) that has elapsed since the last
duke@435 605 // time that any part of the heap was examined by a garbage collection.
duke@435 606 virtual jlong millis_since_last_gc() = 0;
duke@435 607
duke@435 608 // Perform any cleanup actions necessary before allowing a verification.
duke@435 609 virtual void prepare_for_verify() = 0;
duke@435 610
ysr@1050 611 // Generate any dumps preceding or following a full gc
ysr@1050 612 void pre_full_gc_dump();
ysr@1050 613 void post_full_gc_dump();
ysr@1050 614
tonyp@3269 615 // Print heap information on the given outputStream.
duke@435 616 virtual void print_on(outputStream* st) const = 0;
tonyp@3269 617 // The default behavior is to call print_on() on tty.
tonyp@3269 618 virtual void print() const {
tonyp@3269 619 print_on(tty);
tonyp@3269 620 }
tonyp@3269 621 // Print more detailed heap information on the given
tonyp@3269 622 // outputStream. The default behaviour is to call print_on(). It is
tonyp@3269 623 // up to each subclass to override it and add any additional output
tonyp@3269 624 // it needs.
tonyp@3269 625 virtual void print_extended_on(outputStream* st) const {
tonyp@3269 626 print_on(st);
tonyp@3269 627 }
duke@435 628
duke@435 629 // Print all GC threads (other than the VM thread)
duke@435 630 // used by this heap.
duke@435 631 virtual void print_gc_threads_on(outputStream* st) const = 0;
tonyp@3269 632 // The default behavior is to call print_gc_threads_on() on tty.
tonyp@3269 633 void print_gc_threads() {
tonyp@3269 634 print_gc_threads_on(tty);
tonyp@3269 635 }
duke@435 636 // Iterator for all GC threads (other than VM thread)
duke@435 637 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
duke@435 638
duke@435 639 // Print any relevant tracing info that flags imply.
duke@435 640 // Default implementation does nothing.
duke@435 641 virtual void print_tracing_info() const = 0;
duke@435 642
never@3499 643 // If PrintHeapAtGC is set call the appropriate routi
never@3499 644 void print_heap_before_gc() {
never@3499 645 if (PrintHeapAtGC) {
never@3499 646 Universe::print_heap_before_gc();
never@3499 647 }
never@3499 648 if (_gc_heap_log != NULL) {
never@3499 649 _gc_heap_log->log_heap_before();
never@3499 650 }
never@3499 651 }
never@3499 652 void print_heap_after_gc() {
never@3499 653 if (PrintHeapAtGC) {
never@3499 654 Universe::print_heap_after_gc();
never@3499 655 }
never@3499 656 if (_gc_heap_log != NULL) {
never@3499 657 _gc_heap_log->log_heap_after();
never@3499 658 }
never@3499 659 }
never@3499 660
duke@435 661 // Heap verification
brutisso@3711 662 virtual void verify(bool silent, VerifyOption option) = 0;
duke@435 663
duke@435 664 // Non product verification and debugging.
duke@435 665 #ifndef PRODUCT
duke@435 666 // Support for PromotionFailureALot. Return true if it's time to cause a
duke@435 667 // promotion failure. The no-argument version uses
duke@435 668 // this->_promotion_failure_alot_count as the counter.
duke@435 669 inline bool promotion_should_fail(volatile size_t* count);
duke@435 670 inline bool promotion_should_fail();
duke@435 671
duke@435 672 // Reset the PromotionFailureALot counters. Should be called at the end of a
duke@435 673 // GC in which promotion failure ocurred.
duke@435 674 inline void reset_promotion_should_fail(volatile size_t* count);
duke@435 675 inline void reset_promotion_should_fail();
duke@435 676 #endif // #ifndef PRODUCT
duke@435 677
duke@435 678 #ifdef ASSERT
duke@435 679 static int fired_fake_oom() {
duke@435 680 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
duke@435 681 }
duke@435 682 #endif
jmasa@2188 683
jmasa@2188 684 public:
jmasa@2188 685 // This is a convenience method that is used in cases where
jmasa@2188 686 // the actual number of GC worker threads is not pertinent but
jmasa@2188 687 // only whether there more than 0. Use of this method helps
jmasa@2188 688 // reduce the occurrence of ParallelGCThreads to uses where the
jmasa@2188 689 // actual number may be germane.
jmasa@2188 690 static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
stefank@3335 691
stefank@3335 692 /////////////// Unit tests ///////////////
stefank@3335 693
stefank@3335 694 NOT_PRODUCT(static void test_is_in();)
duke@435 695 };
duke@435 696
duke@435 697 // Class to set and reset the GC cause for a CollectedHeap.
duke@435 698
duke@435 699 class GCCauseSetter : StackObj {
duke@435 700 CollectedHeap* _heap;
duke@435 701 GCCause::Cause _previous_cause;
duke@435 702 public:
duke@435 703 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
duke@435 704 assert(SafepointSynchronize::is_at_safepoint(),
duke@435 705 "This method manipulates heap state without locking");
duke@435 706 _heap = heap;
duke@435 707 _previous_cause = _heap->gc_cause();
duke@435 708 _heap->set_gc_cause(cause);
duke@435 709 }
duke@435 710
duke@435 711 ~GCCauseSetter() {
duke@435 712 assert(SafepointSynchronize::is_at_safepoint(),
duke@435 713 "This method manipulates heap state without locking");
duke@435 714 _heap->set_gc_cause(_previous_cause);
duke@435 715 }
duke@435 716 };
stefank@2314 717
stefank@2314 718 #endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP

mercurial