Tue, 14 Jun 2011 11:01:10 -0700
7004681: G1: Extend marking verification to Full GCs
Summary: Perform a heap verification after the first phase of G1's full GC using objects' mark words to determine liveness. The third parameter of the heap verification routines, which was used in G1 to determine which marking bitmap to use in liveness calculations, has been changed from a boolean to an enum with values defined for using the mark word, and the 'prev' and 'next' bitmaps.
Reviewed-by: tonyp, ysr
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
28 #include "gc_interface/gcCause.hpp"
29 #include "memory/allocation.hpp"
30 #include "memory/barrierSet.hpp"
31 #include "runtime/handles.hpp"
32 #include "runtime/perfData.hpp"
33 #include "runtime/safepoint.hpp"
35 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
36 // is an abstract class: there may be many different kinds of heaps. This
37 // class defines the functions that a heap must implement, and contains
38 // infrastructure common to all heaps.
40 class BarrierSet;
41 class ThreadClosure;
42 class AdaptiveSizePolicy;
43 class Thread;
44 class CollectorPolicy;
46 //
47 // CollectedHeap
48 // SharedHeap
49 // GenCollectedHeap
50 // G1CollectedHeap
51 // ParallelScavengeHeap
52 //
53 class CollectedHeap : public CHeapObj {
54 friend class VMStructs;
55 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
56 friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe
58 #ifdef ASSERT
59 static int _fire_out_of_memory_count;
60 #endif
62 // Used for filler objects (static, but initialized in ctor).
63 static size_t _filler_array_max_size;
65 // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
66 bool _defer_initial_card_mark;
68 protected:
69 MemRegion _reserved;
70 BarrierSet* _barrier_set;
71 bool _is_gc_active;
72 int _n_par_threads;
74 unsigned int _total_collections; // ... started
75 unsigned int _total_full_collections; // ... started
76 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
77 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
79 // Reason for current garbage collection. Should be set to
80 // a value reflecting no collection between collections.
81 GCCause::Cause _gc_cause;
82 GCCause::Cause _gc_lastcause;
83 PerfStringVariable* _perf_gc_cause;
84 PerfStringVariable* _perf_gc_lastcause;
86 // Constructor
87 CollectedHeap();
89 // Do common initializations that must follow instance construction,
90 // for example, those needing virtual calls.
91 // This code could perhaps be moved into initialize() but would
92 // be slightly more awkward because we want the latter to be a
93 // pure virtual.
94 void pre_initialize();
96 // Create a new tlab
97 virtual HeapWord* allocate_new_tlab(size_t size);
99 // Accumulate statistics on all tlabs.
100 virtual void accumulate_statistics_all_tlabs();
102 // Reinitialize tlabs before resuming mutators.
103 virtual void resize_all_tlabs();
105 protected:
106 // Allocate from the current thread's TLAB, with broken-out slow path.
107 inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
108 static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
110 // Allocate an uninitialized block of the given size, or returns NULL if
111 // this is impossible.
112 inline static HeapWord* common_mem_allocate_noinit(size_t size, bool is_noref, TRAPS);
114 // Like allocate_init, but the block returned by a successful allocation
115 // is guaranteed initialized to zeros.
116 inline static HeapWord* common_mem_allocate_init(size_t size, bool is_noref, TRAPS);
118 // Same as common_mem version, except memory is allocated in the permanent area
119 // If there is no permanent area, revert to common_mem_allocate_noinit
120 inline static HeapWord* common_permanent_mem_allocate_noinit(size_t size, TRAPS);
122 // Same as common_mem version, except memory is allocated in the permanent area
123 // If there is no permanent area, revert to common_mem_allocate_init
124 inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS);
126 // Helper functions for (VM) allocation.
127 inline static void post_allocation_setup_common(KlassHandle klass,
128 HeapWord* obj, size_t size);
129 inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
130 HeapWord* objPtr,
131 size_t size);
133 inline static void post_allocation_setup_obj(KlassHandle klass,
134 HeapWord* obj, size_t size);
136 inline static void post_allocation_setup_array(KlassHandle klass,
137 HeapWord* obj, size_t size,
138 int length);
140 // Clears an allocated object.
141 inline static void init_obj(HeapWord* obj, size_t size);
143 // Filler object utilities.
144 static inline size_t filler_array_hdr_size();
145 static inline size_t filler_array_min_size();
146 static inline size_t filler_array_max_size();
148 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
149 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
151 // Fill with a single array; caller must ensure filler_array_min_size() <=
152 // words <= filler_array_max_size().
153 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
155 // Fill with a single object (either an int array or a java.lang.Object).
156 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
158 // Verification functions
159 virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
160 PRODUCT_RETURN;
161 virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
162 PRODUCT_RETURN;
163 debug_only(static void check_for_valid_allocation_state();)
165 public:
166 enum Name {
167 Abstract,
168 SharedHeap,
169 GenCollectedHeap,
170 ParallelScavengeHeap,
171 G1CollectedHeap
172 };
174 virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
176 /**
177 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
178 * and JNI_OK on success.
179 */
180 virtual jint initialize() = 0;
182 // In many heaps, there will be a need to perform some initialization activities
183 // after the Universe is fully formed, but before general heap allocation is allowed.
184 // This is the correct place to place such initialization methods.
185 virtual void post_initialize() = 0;
187 MemRegion reserved_region() const { return _reserved; }
188 address base() const { return (address)reserved_region().start(); }
190 // Future cleanup here. The following functions should specify bytes or
191 // heapwords as part of their signature.
192 virtual size_t capacity() const = 0;
193 virtual size_t used() const = 0;
195 // Return "true" if the part of the heap that allocates Java
196 // objects has reached the maximal committed limit that it can
197 // reach, without a garbage collection.
198 virtual bool is_maximal_no_gc() const = 0;
200 virtual size_t permanent_capacity() const = 0;
201 virtual size_t permanent_used() const = 0;
203 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
204 // memory that the vm could make available for storing 'normal' java objects.
205 // This is based on the reserved address space, but should not include space
206 // that the vm uses internally for bookkeeping or temporary storage (e.g.,
207 // perm gen space or, in the case of the young gen, one of the survivor
208 // spaces).
209 virtual size_t max_capacity() const = 0;
211 // Returns "TRUE" if "p" points into the reserved area of the heap.
212 bool is_in_reserved(const void* p) const {
213 return _reserved.contains(p);
214 }
216 bool is_in_reserved_or_null(const void* p) const {
217 return p == NULL || is_in_reserved(p);
218 }
220 // Returns "TRUE" if "p" points to the head of an allocated object in the
221 // heap. Since this method can be expensive in general, we restrict its
222 // use to assertion checking only.
223 virtual bool is_in(const void* p) const = 0;
225 bool is_in_or_null(const void* p) const {
226 return p == NULL || is_in(p);
227 }
229 // Let's define some terms: a "closed" subset of a heap is one that
230 //
231 // 1) contains all currently-allocated objects, and
232 //
233 // 2) is closed under reference: no object in the closed subset
234 // references one outside the closed subset.
235 //
236 // Membership in a heap's closed subset is useful for assertions.
237 // Clearly, the entire heap is a closed subset, so the default
238 // implementation is to use "is_in_reserved". But this may not be too
239 // liberal to perform useful checking. Also, the "is_in" predicate
240 // defines a closed subset, but may be too expensive, since "is_in"
241 // verifies that its argument points to an object head. The
242 // "closed_subset" method allows a heap to define an intermediate
243 // predicate, allowing more precise checking than "is_in_reserved" at
244 // lower cost than "is_in."
246 // One important case is a heap composed of disjoint contiguous spaces,
247 // such as the Garbage-First collector. Such heaps have a convenient
248 // closed subset consisting of the allocated portions of those
249 // contiguous spaces.
251 // Return "TRUE" iff the given pointer points into the heap's defined
252 // closed subset (which defaults to the entire heap).
253 virtual bool is_in_closed_subset(const void* p) const {
254 return is_in_reserved(p);
255 }
257 bool is_in_closed_subset_or_null(const void* p) const {
258 return p == NULL || is_in_closed_subset(p);
259 }
261 // XXX is_permanent() and is_in_permanent() should be better named
262 // to distinguish one from the other.
264 // Returns "TRUE" if "p" is allocated as "permanent" data.
265 // If the heap does not use "permanent" data, returns the same
266 // value is_in_reserved() would return.
267 // NOTE: this actually returns true if "p" is in reserved space
268 // for the space not that it is actually allocated (i.e. in committed
269 // space). If you need the more conservative answer use is_permanent().
270 virtual bool is_in_permanent(const void *p) const = 0;
273 #ifdef ASSERT
274 // Returns true if "p" is in the part of the
275 // heap being collected.
276 virtual bool is_in_partial_collection(const void *p) = 0;
277 #endif
279 bool is_in_permanent_or_null(const void *p) const {
280 return p == NULL || is_in_permanent(p);
281 }
283 // Returns "TRUE" if "p" is in the committed area of "permanent" data.
284 // If the heap does not use "permanent" data, returns the same
285 // value is_in() would return.
286 virtual bool is_permanent(const void *p) const = 0;
288 bool is_permanent_or_null(const void *p) const {
289 return p == NULL || is_permanent(p);
290 }
292 // An object is scavengable if its location may move during a scavenge.
293 // (A scavenge is a GC which is not a full GC.)
294 virtual bool is_scavengable(const void *p) = 0;
296 // Returns "TRUE" if "p" is a method oop in the
297 // current heap, with high probability. This predicate
298 // is not stable, in general.
299 bool is_valid_method(oop p) const;
301 void set_gc_cause(GCCause::Cause v) {
302 if (UsePerfData) {
303 _gc_lastcause = _gc_cause;
304 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
305 _perf_gc_cause->set_value(GCCause::to_string(v));
306 }
307 _gc_cause = v;
308 }
309 GCCause::Cause gc_cause() { return _gc_cause; }
311 // Number of threads currently working on GC tasks.
312 int n_par_threads() { return _n_par_threads; }
314 // May be overridden to set additional parallelism.
315 virtual void set_par_threads(int t) { _n_par_threads = t; };
317 // Preload classes into the shared portion of the heap, and then dump
318 // that data to a file so that it can be loaded directly by another
319 // VM (then terminate).
320 virtual void preload_and_dump(TRAPS) { ShouldNotReachHere(); }
322 // General obj/array allocation facilities.
323 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
324 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
325 inline static oop large_typearray_allocate(KlassHandle klass, int size, int length, TRAPS);
327 // Special obj/array allocation facilities.
328 // Some heaps may want to manage "permanent" data uniquely. These default
329 // to the general routines if the heap does not support such handling.
330 inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS);
331 // permanent_obj_allocate_no_klass_install() does not do the installation of
332 // the klass pointer in the newly created object (as permanent_obj_allocate()
333 // above does). This allows for a delay in the installation of the klass
334 // pointer that is needed during the create of klassKlass's. The
335 // method post_allocation_install_obj_klass() is used to install the
336 // klass pointer.
337 inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass,
338 int size,
339 TRAPS);
340 inline static void post_allocation_install_obj_klass(KlassHandle klass,
341 oop obj,
342 int size);
343 inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
345 // Raw memory allocation facilities
346 // The obj and array allocate methods are covers for these methods.
347 // The permanent allocation method should default to mem_allocate if
348 // permanent memory isn't supported.
349 virtual HeapWord* mem_allocate(size_t size,
350 bool is_noref,
351 bool is_tlab,
352 bool* gc_overhead_limit_was_exceeded) = 0;
353 virtual HeapWord* permanent_mem_allocate(size_t size) = 0;
355 // The boundary between a "large" and "small" array of primitives, in words.
356 virtual size_t large_typearray_limit() = 0;
358 // Utilities for turning raw memory into filler objects.
359 //
360 // min_fill_size() is the smallest region that can be filled.
361 // fill_with_objects() can fill arbitrary-sized regions of the heap using
362 // multiple objects. fill_with_object() is for regions known to be smaller
363 // than the largest array of integers; it uses a single object to fill the
364 // region and has slightly less overhead.
365 static size_t min_fill_size() {
366 return size_t(align_object_size(oopDesc::header_size()));
367 }
369 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
371 static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
372 static void fill_with_object(MemRegion region, bool zap = true) {
373 fill_with_object(region.start(), region.word_size(), zap);
374 }
375 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
376 fill_with_object(start, pointer_delta(end, start), zap);
377 }
379 // Some heaps may offer a contiguous region for shared non-blocking
380 // allocation, via inlined code (by exporting the address of the top and
381 // end fields defining the extent of the contiguous allocation region.)
383 // This function returns "true" iff the heap supports this kind of
384 // allocation. (Default is "no".)
385 virtual bool supports_inline_contig_alloc() const {
386 return false;
387 }
388 // These functions return the addresses of the fields that define the
389 // boundaries of the contiguous allocation area. (These fields should be
390 // physically near to one another.)
391 virtual HeapWord** top_addr() const {
392 guarantee(false, "inline contiguous allocation not supported");
393 return NULL;
394 }
395 virtual HeapWord** end_addr() const {
396 guarantee(false, "inline contiguous allocation not supported");
397 return NULL;
398 }
400 // Some heaps may be in an unparseable state at certain times between
401 // collections. This may be necessary for efficient implementation of
402 // certain allocation-related activities. Calling this function before
403 // attempting to parse a heap ensures that the heap is in a parsable
404 // state (provided other concurrent activity does not introduce
405 // unparsability). It is normally expected, therefore, that this
406 // method is invoked with the world stopped.
407 // NOTE: if you override this method, make sure you call
408 // super::ensure_parsability so that the non-generational
409 // part of the work gets done. See implementation of
410 // CollectedHeap::ensure_parsability and, for instance,
411 // that of GenCollectedHeap::ensure_parsability().
412 // The argument "retire_tlabs" controls whether existing TLABs
413 // are merely filled or also retired, thus preventing further
414 // allocation from them and necessitating allocation of new TLABs.
415 virtual void ensure_parsability(bool retire_tlabs);
417 // Return an estimate of the maximum allocation that could be performed
418 // without triggering any collection or expansion activity. In a
419 // generational collector, for example, this is probably the largest
420 // allocation that could be supported (without expansion) in the youngest
421 // generation. It is "unsafe" because no locks are taken; the result
422 // should be treated as an approximation, not a guarantee, for use in
423 // heuristic resizing decisions.
424 virtual size_t unsafe_max_alloc() = 0;
426 // Section on thread-local allocation buffers (TLABs)
427 // If the heap supports thread-local allocation buffers, it should override
428 // the following methods:
429 // Returns "true" iff the heap supports thread-local allocation buffers.
430 // The default is "no".
431 virtual bool supports_tlab_allocation() const {
432 return false;
433 }
434 // The amount of space available for thread-local allocation buffers.
435 virtual size_t tlab_capacity(Thread *thr) const {
436 guarantee(false, "thread-local allocation buffers not supported");
437 return 0;
438 }
439 // An estimate of the maximum allocation that could be performed
440 // for thread-local allocation buffers without triggering any
441 // collection or expansion activity.
442 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
443 guarantee(false, "thread-local allocation buffers not supported");
444 return 0;
445 }
447 // Can a compiler initialize a new object without store barriers?
448 // This permission only extends from the creation of a new object
449 // via a TLAB up to the first subsequent safepoint. If such permission
450 // is granted for this heap type, the compiler promises to call
451 // defer_store_barrier() below on any slow path allocation of
452 // a new object for which such initializing store barriers will
453 // have been elided.
454 virtual bool can_elide_tlab_store_barriers() const = 0;
456 // If a compiler is eliding store barriers for TLAB-allocated objects,
457 // there is probably a corresponding slow path which can produce
458 // an object allocated anywhere. The compiler's runtime support
459 // promises to call this function on such a slow-path-allocated
460 // object before performing initializations that have elided
461 // store barriers. Returns new_obj, or maybe a safer copy thereof.
462 virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
464 // Answers whether an initializing store to a new object currently
465 // allocated at the given address doesn't need a store
466 // barrier. Returns "true" if it doesn't need an initializing
467 // store barrier; answers "false" if it does.
468 virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
470 // If a compiler is eliding store barriers for TLAB-allocated objects,
471 // we will be informed of a slow-path allocation by a call
472 // to new_store_pre_barrier() above. Such a call precedes the
473 // initialization of the object itself, and no post-store-barriers will
474 // be issued. Some heap types require that the barrier strictly follows
475 // the initializing stores. (This is currently implemented by deferring the
476 // barrier until the next slow-path allocation or gc-related safepoint.)
477 // This interface answers whether a particular heap type needs the card
478 // mark to be thus strictly sequenced after the stores.
479 virtual bool card_mark_must_follow_store() const = 0;
481 // If the CollectedHeap was asked to defer a store barrier above,
482 // this informs it to flush such a deferred store barrier to the
483 // remembered set.
484 virtual void flush_deferred_store_barrier(JavaThread* thread);
486 // Can a compiler elide a store barrier when it writes
487 // a permanent oop into the heap? Applies when the compiler
488 // is storing x to the heap, where x->is_perm() is true.
489 virtual bool can_elide_permanent_oop_store_barriers() const = 0;
491 // Does this heap support heap inspection (+PrintClassHistogram?)
492 virtual bool supports_heap_inspection() const = 0;
494 // Perform a collection of the heap; intended for use in implementing
495 // "System.gc". This probably implies as full a collection as the
496 // "CollectedHeap" supports.
497 virtual void collect(GCCause::Cause cause) = 0;
499 // This interface assumes that it's being called by the
500 // vm thread. It collects the heap assuming that the
501 // heap lock is already held and that we are executing in
502 // the context of the vm thread.
503 virtual void collect_as_vm_thread(GCCause::Cause cause) = 0;
505 // Returns the barrier set for this heap
506 BarrierSet* barrier_set() { return _barrier_set; }
508 // Returns "true" iff there is a stop-world GC in progress. (I assume
509 // that it should answer "false" for the concurrent part of a concurrent
510 // collector -- dld).
511 bool is_gc_active() const { return _is_gc_active; }
513 // Total number of GC collections (started)
514 unsigned int total_collections() const { return _total_collections; }
515 unsigned int total_full_collections() const { return _total_full_collections;}
517 // Increment total number of GC collections (started)
518 // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
519 void increment_total_collections(bool full = false) {
520 _total_collections++;
521 if (full) {
522 increment_total_full_collections();
523 }
524 }
526 void increment_total_full_collections() { _total_full_collections++; }
528 // Return the AdaptiveSizePolicy for the heap.
529 virtual AdaptiveSizePolicy* size_policy() = 0;
531 // Return the CollectorPolicy for the heap
532 virtual CollectorPolicy* collector_policy() const = 0;
534 // Iterate over all the ref-containing fields of all objects, calling
535 // "cl.do_oop" on each. This includes objects in permanent memory.
536 virtual void oop_iterate(OopClosure* cl) = 0;
538 // Iterate over all objects, calling "cl.do_object" on each.
539 // This includes objects in permanent memory.
540 virtual void object_iterate(ObjectClosure* cl) = 0;
542 // Similar to object_iterate() except iterates only
543 // over live objects.
544 virtual void safe_object_iterate(ObjectClosure* cl) = 0;
546 // Behaves the same as oop_iterate, except only traverses
547 // interior pointers contained in permanent memory. If there
548 // is no permanent memory, does nothing.
549 virtual void permanent_oop_iterate(OopClosure* cl) = 0;
551 // Behaves the same as object_iterate, except only traverses
552 // object contained in permanent memory. If there is no
553 // permanent memory, does nothing.
554 virtual void permanent_object_iterate(ObjectClosure* cl) = 0;
556 // NOTE! There is no requirement that a collector implement these
557 // functions.
558 //
559 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
560 // each address in the (reserved) heap is a member of exactly
561 // one block. The defining characteristic of a block is that it is
562 // possible to find its size, and thus to progress forward to the next
563 // block. (Blocks may be of different sizes.) Thus, blocks may
564 // represent Java objects, or they might be free blocks in a
565 // free-list-based heap (or subheap), as long as the two kinds are
566 // distinguishable and the size of each is determinable.
568 // Returns the address of the start of the "block" that contains the
569 // address "addr". We say "blocks" instead of "object" since some heaps
570 // may not pack objects densely; a chunk may either be an object or a
571 // non-object.
572 virtual HeapWord* block_start(const void* addr) const = 0;
574 // Requires "addr" to be the start of a chunk, and returns its size.
575 // "addr + size" is required to be the start of a new chunk, or the end
576 // of the active area of the heap.
577 virtual size_t block_size(const HeapWord* addr) const = 0;
579 // Requires "addr" to be the start of a block, and returns "TRUE" iff
580 // the block is an object.
581 virtual bool block_is_obj(const HeapWord* addr) const = 0;
583 // Returns the longest time (in ms) that has elapsed since the last
584 // time that any part of the heap was examined by a garbage collection.
585 virtual jlong millis_since_last_gc() = 0;
587 // Perform any cleanup actions necessary before allowing a verification.
588 virtual void prepare_for_verify() = 0;
590 // Generate any dumps preceding or following a full gc
591 void pre_full_gc_dump();
592 void post_full_gc_dump();
594 virtual void print() const = 0;
595 virtual void print_on(outputStream* st) const = 0;
597 // Print all GC threads (other than the VM thread)
598 // used by this heap.
599 virtual void print_gc_threads_on(outputStream* st) const = 0;
600 void print_gc_threads() { print_gc_threads_on(tty); }
601 // Iterator for all GC threads (other than VM thread)
602 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
604 // Print any relevant tracing info that flags imply.
605 // Default implementation does nothing.
606 virtual void print_tracing_info() const = 0;
608 // Heap verification
609 virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
611 // Non product verification and debugging.
612 #ifndef PRODUCT
613 // Support for PromotionFailureALot. Return true if it's time to cause a
614 // promotion failure. The no-argument version uses
615 // this->_promotion_failure_alot_count as the counter.
616 inline bool promotion_should_fail(volatile size_t* count);
617 inline bool promotion_should_fail();
619 // Reset the PromotionFailureALot counters. Should be called at the end of a
620 // GC in which promotion failure ocurred.
621 inline void reset_promotion_should_fail(volatile size_t* count);
622 inline void reset_promotion_should_fail();
623 #endif // #ifndef PRODUCT
625 #ifdef ASSERT
626 static int fired_fake_oom() {
627 return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
628 }
629 #endif
631 public:
632 // This is a convenience method that is used in cases where
633 // the actual number of GC worker threads is not pertinent but
634 // only whether there more than 0. Use of this method helps
635 // reduce the occurrence of ParallelGCThreads to uses where the
636 // actual number may be germane.
637 static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
638 };
640 // Class to set and reset the GC cause for a CollectedHeap.
642 class GCCauseSetter : StackObj {
643 CollectedHeap* _heap;
644 GCCause::Cause _previous_cause;
645 public:
646 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
647 assert(SafepointSynchronize::is_at_safepoint(),
648 "This method manipulates heap state without locking");
649 _heap = heap;
650 _previous_cause = _heap->gc_cause();
651 _heap->set_gc_cause(cause);
652 }
654 ~GCCauseSetter() {
655 assert(SafepointSynchronize::is_at_safepoint(),
656 "This method manipulates heap state without locking");
657 _heap->set_gc_cause(_previous_cause);
658 }
659 };
661 #endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP