duke@435: /* duke@435: * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: // A Generation models a heap area for similarly-aged objects. duke@435: // It will contain one ore more spaces holding the actual objects. duke@435: // duke@435: // The Generation class hierarchy: duke@435: // duke@435: // Generation - abstract base class duke@435: // - DefNewGeneration - allocation area (copy collected) duke@435: // - ParNewGeneration - a DefNewGeneration that is collected by duke@435: // several threads duke@435: // - CardGeneration - abstract class adding offset array behavior duke@435: // - OneContigSpaceCardGeneration - abstract class holding a single duke@435: // contiguous space with card marking duke@435: // - TenuredGeneration - tenured (old object) space (markSweepCompact) duke@435: // - CompactingPermGenGen - reflective object area (klasses, methods, symbols, ...) duke@435: // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation duke@435: // (Detlefs-Printezis refinement of duke@435: // Boehm-Demers-Schenker) duke@435: // duke@435: // The system configurations currently allowed are: duke@435: // duke@435: // DefNewGeneration + TenuredGeneration + PermGeneration duke@435: // DefNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen duke@435: // duke@435: // ParNewGeneration + TenuredGeneration + PermGeneration duke@435: // ParNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen duke@435: // duke@435: duke@435: class DefNewGeneration; duke@435: class GenerationSpec; duke@435: class CompactibleSpace; duke@435: class ContiguousSpace; duke@435: class CompactPoint; duke@435: class OopsInGenClosure; duke@435: class OopClosure; duke@435: class ScanClosure; duke@435: class FastScanClosure; duke@435: class GenCollectedHeap; duke@435: class GenRemSet; duke@435: class GCStats; duke@435: duke@435: // A "ScratchBlock" represents a block of memory in one generation usable by duke@435: // another. It represents "num_words" free words, starting at and including duke@435: // the address of "this". duke@435: struct ScratchBlock { duke@435: ScratchBlock* next; duke@435: size_t num_words; duke@435: HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming duke@435: // first two fields are word-sized.) duke@435: }; duke@435: duke@435: duke@435: class Generation: public CHeapObj { duke@435: friend class VMStructs; duke@435: private: duke@435: jlong _time_of_last_gc; // time when last gc on this generation happened (ms) duke@435: MemRegion _prev_used_region; // for collectors that want to "remember" a value for duke@435: // used region at some specific point during collection. duke@435: duke@435: protected: duke@435: // Minimum and maximum addresses for memory reserved (not necessarily duke@435: // committed) for generation. duke@435: // Used by card marking code. Must not overlap with address ranges of duke@435: // other generations. duke@435: MemRegion _reserved; duke@435: duke@435: // Memory area reserved for generation duke@435: VirtualSpace _virtual_space; duke@435: duke@435: // Level in the generation hierarchy. duke@435: int _level; duke@435: duke@435: // ("Weak") Reference processing support duke@435: ReferenceProcessor* _ref_processor; duke@435: duke@435: // Performance Counters duke@435: CollectorCounters* _gc_counters; duke@435: duke@435: // Statistics for garbage collection duke@435: GCStats* _gc_stats; duke@435: duke@435: // Returns the next generation in the configuration, or else NULL if this duke@435: // is the highest generation. duke@435: Generation* next_gen() const; duke@435: duke@435: // Initialize the generation. duke@435: Generation(ReservedSpace rs, size_t initial_byte_size, int level); duke@435: duke@435: // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in duke@435: // "sp" that point into younger generations. duke@435: // The iteration is only over objects allocated at the start of the duke@435: // iterations; objects allocated as a result of applying the closure are duke@435: // not included. duke@435: void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); duke@435: duke@435: public: duke@435: // The set of possible generation kinds. duke@435: enum Name { duke@435: ASParNew, duke@435: ASConcurrentMarkSweep, duke@435: DefNew, duke@435: ParNew, duke@435: MarkSweepCompact, duke@435: ConcurrentMarkSweep, duke@435: Other duke@435: }; duke@435: duke@435: enum SomePublicConstants { duke@435: // Generations are GenGrain-aligned and have size that are multiples of duke@435: // GenGrain. duke@435: LogOfGenGrain = 16, duke@435: GenGrain = 1 << LogOfGenGrain duke@435: }; duke@435: duke@435: // allocate and initialize ("weak") refs processing support duke@435: virtual void ref_processor_init(); duke@435: void set_ref_processor(ReferenceProcessor* rp) { duke@435: assert(_ref_processor == NULL, "clobbering existing _ref_processor"); duke@435: _ref_processor = rp; duke@435: } duke@435: duke@435: virtual Generation::Name kind() { return Generation::Other; } duke@435: GenerationSpec* spec(); duke@435: duke@435: // This properly belongs in the collector, but for now this duke@435: // will do. duke@435: virtual bool refs_discovery_is_atomic() const { return true; } duke@435: virtual bool refs_discovery_is_mt() const { return false; } duke@435: duke@435: // Space enquiries (results in bytes) duke@435: virtual size_t capacity() const = 0; // The maximum number of object bytes the duke@435: // generation can currently hold. duke@435: virtual size_t used() const = 0; // The number of used bytes in the gen. duke@435: virtual size_t free() const = 0; // The number of free bytes in the gen. duke@435: duke@435: // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. duke@435: // Returns the total number of bytes available in a generation duke@435: // for the allocation of objects. duke@435: virtual size_t max_capacity() const; duke@435: duke@435: // If this is a young generation, the maximum number of bytes that can be duke@435: // allocated in this generation before a GC is triggered. duke@435: virtual size_t capacity_before_gc() const { return 0; } duke@435: duke@435: // The largest number of contiguous free bytes in the generation, duke@435: // including expansion (Assumes called at a safepoint.) duke@435: virtual size_t contiguous_available() const = 0; duke@435: // The largest number of contiguous free bytes in this or any higher generation. duke@435: virtual size_t max_contiguous_available() const; duke@435: duke@435: // Returns true if promotions of the specified amount can duke@435: // be attempted safely (without a vm failure). duke@435: // Promotion of the full amount is not guaranteed but duke@435: // can be attempted. duke@435: // younger_handles_promotion_failure duke@435: // is true if the younger generation handles a promotion duke@435: // failure. duke@435: virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, duke@435: bool younger_handles_promotion_failure) const; duke@435: duke@435: // Return an estimate of the maximum allocation that could be performed duke@435: // in the generation without triggering any collection or expansion duke@435: // activity. It is "unsafe" because no locks are taken; the result duke@435: // should be treated as an approximation, not a guarantee, for use in duke@435: // heuristic resizing decisions. duke@435: virtual size_t unsafe_max_alloc_nogc() const = 0; duke@435: duke@435: // Returns true if this generation cannot be expanded further duke@435: // without a GC. Override as appropriate. duke@435: virtual bool is_maximal_no_gc() const { duke@435: return _virtual_space.uncommitted_size() == 0; duke@435: } duke@435: duke@435: MemRegion reserved() const { return _reserved; } duke@435: duke@435: // Returns a region guaranteed to contain all the objects in the duke@435: // generation. duke@435: virtual MemRegion used_region() const { return _reserved; } duke@435: duke@435: MemRegion prev_used_region() const { return _prev_used_region; } duke@435: virtual void save_used_region() { _prev_used_region = used_region(); } duke@435: duke@435: // Returns "TRUE" iff "p" points into an allocated object in the generation. duke@435: // For some kinds of generations, this may be an expensive operation. duke@435: // To avoid performance problems stemming from its inadvertent use in duke@435: // product jvm's, we restrict its use to assertion checking or duke@435: // verification only. duke@435: virtual bool is_in(const void* p) const; duke@435: duke@435: /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ duke@435: bool is_in_reserved(const void* p) const { duke@435: return _reserved.contains(p); duke@435: } duke@435: duke@435: // Check that the generation kind is DefNewGeneration or a sub duke@435: // class of DefNewGeneration and return a DefNewGeneration* duke@435: DefNewGeneration* as_DefNewGeneration(); duke@435: duke@435: // If some space in the generation contains the given "addr", return a duke@435: // pointer to that space, else return "NULL". duke@435: virtual Space* space_containing(const void* addr) const; duke@435: duke@435: // Iteration - do not use for time critical operations duke@435: virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; duke@435: duke@435: // Returns the first space, if any, in the generation that can participate duke@435: // in compaction, or else "NULL". duke@435: virtual CompactibleSpace* first_compaction_space() const = 0; duke@435: duke@435: // Returns "true" iff this generation should be used to allocate an duke@435: // object of the given size. Young generations might duke@435: // wish to exclude very large objects, for example, since, if allocated duke@435: // often, they would greatly increase the frequency of young-gen duke@435: // collection. duke@435: virtual bool should_allocate(size_t word_size, bool is_tlab) { duke@435: bool result = false; duke@435: size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); duke@435: if (!is_tlab || supports_tlab_allocation()) { duke@435: result = (word_size > 0) && (word_size < overflow_limit); duke@435: } duke@435: return result; duke@435: } duke@435: duke@435: // Allocate and returns a block of the requested size, or returns "NULL". duke@435: // Assumes the caller has done any necessary locking. duke@435: virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; duke@435: duke@435: // Like "allocate", but performs any necessary locking internally. duke@435: virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; duke@435: duke@435: // A 'younger' gen has reached an allocation limit, and uses this to notify duke@435: // the next older gen. The return value is a new limit, or NULL if none. The duke@435: // caller must do the necessary locking. duke@435: virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top, duke@435: size_t word_size) { duke@435: return NULL; duke@435: } duke@435: duke@435: // Some generation may offer a region for shared, contiguous allocation, duke@435: // via inlined code (by exporting the address of the top and end fields duke@435: // defining the extent of the contiguous allocation region.) duke@435: duke@435: // This function returns "true" iff the heap supports this kind of duke@435: // allocation. (More precisely, this means the style of allocation that duke@435: // increments *top_addr()" with a CAS.) (Default is "no".) duke@435: // A generation that supports this allocation style must use lock-free duke@435: // allocation for *all* allocation, since there are times when lock free duke@435: // allocation will be concurrent with plain "allocate" calls. duke@435: virtual bool supports_inline_contig_alloc() const { return false; } duke@435: duke@435: // These functions return the addresses of the fields that define the duke@435: // boundaries of the contiguous allocation area. (These fields should be duke@435: // physicall near to one another.) duke@435: virtual HeapWord** top_addr() const { return NULL; } duke@435: virtual HeapWord** end_addr() const { return NULL; } duke@435: duke@435: // Thread-local allocation buffers duke@435: virtual bool supports_tlab_allocation() const { return false; } duke@435: virtual size_t tlab_capacity() const { duke@435: guarantee(false, "Generation doesn't support thread local allocation buffers"); duke@435: return 0; duke@435: } duke@435: virtual size_t unsafe_max_tlab_alloc() const { duke@435: guarantee(false, "Generation doesn't support thread local allocation buffers"); duke@435: return 0; duke@435: } duke@435: duke@435: // "obj" is the address of an object in a younger generation. Allocate space duke@435: // for "obj" in the current (or some higher) generation, and copy "obj" into duke@435: // the newly allocated space, if possible, returning the result (or NULL if duke@435: // the allocation failed). duke@435: // duke@435: // The "obj_size" argument is just obj->size(), passed along so the caller can duke@435: // avoid repeating the virtual call to retrieve it. duke@435: // duke@435: // The "ref" argument, if non-NULL, is the address of some reference to "obj" duke@435: // (that is "*ref == obj"); some generations may use this information to, for duke@435: // example, influence placement decisions. duke@435: // duke@435: // The default implementation ignores "ref" and calls allocate(). duke@435: virtual oop promote(oop obj, size_t obj_size, oop* ref); duke@435: duke@435: // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote duke@435: // object "obj", whose original mark word was "m", and whose size is duke@435: // "word_sz". If possible, allocate space for "obj", copy obj into it duke@435: // (taking care to copy "m" into the mark word when done, since the mark duke@435: // word of "obj" may have been overwritten with a forwarding pointer, and duke@435: // also taking care to copy the klass pointer *last*. Returns the new duke@435: // object if successful, or else NULL. duke@435: virtual oop par_promote(int thread_num, duke@435: oop obj, markOop m, size_t word_sz); duke@435: duke@435: // Undo, if possible, the most recent par_promote_alloc allocation by duke@435: // "thread_num" ("obj", of "word_sz"). duke@435: virtual void par_promote_alloc_undo(int thread_num, duke@435: HeapWord* obj, size_t word_sz); duke@435: duke@435: // Informs the current generation that all par_promote_alloc's in the duke@435: // collection have been completed; any supporting data structures can be duke@435: // reset. Default is to do nothing. duke@435: virtual void par_promote_alloc_done(int thread_num) {} duke@435: duke@435: // Informs the current generation that all oop_since_save_marks_iterates duke@435: // performed by "thread_num" in the current collection, if any, have been duke@435: // completed; any supporting data structures can be reset. Default is to duke@435: // do nothing. duke@435: virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} duke@435: duke@435: // This generation will collect all younger generations duke@435: // during a full collection. duke@435: virtual bool full_collects_younger_generations() const { return false; } duke@435: duke@435: // This generation does in-place marking, meaning that mark words duke@435: // are mutated during the marking phase and presumably reinitialized duke@435: // to a canonical value after the GC. This is currently used by the duke@435: // biased locking implementation to determine whether additional duke@435: // work is required during the GC prologue and epilogue. duke@435: virtual bool performs_in_place_marking() const { return true; } duke@435: duke@435: // Returns "true" iff collect() should subsequently be called on this duke@435: // this generation. See comment below. duke@435: // This is a generic implementation which can be overridden. duke@435: // duke@435: // Note: in the current (1.4) implementation, when genCollectedHeap's duke@435: // incremental_collection_will_fail flag is set, all allocations are duke@435: // slow path (the only fast-path place to allocate is DefNew, which duke@435: // will be full if the flag is set). duke@435: // Thus, older generations which collect younger generations should duke@435: // test this flag and collect if it is set. duke@435: virtual bool should_collect(bool full, duke@435: size_t word_size, duke@435: bool is_tlab) { duke@435: return (full || should_allocate(word_size, is_tlab)); duke@435: } duke@435: duke@435: // Perform a garbage collection. duke@435: // If full is true attempt a full garbage collection of this generation. duke@435: // Otherwise, attempting to (at least) free enough space to support an duke@435: // allocation of the given "word_size". duke@435: virtual void collect(bool full, duke@435: bool clear_all_soft_refs, duke@435: size_t word_size, duke@435: bool is_tlab) = 0; duke@435: duke@435: // Perform a heap collection, attempting to create (at least) enough duke@435: // space to support an allocation of the given "word_size". If duke@435: // successful, perform the allocation and return the resulting duke@435: // "oop" (initializing the allocated block). If the allocation is duke@435: // still unsuccessful, return "NULL". duke@435: virtual HeapWord* expand_and_allocate(size_t word_size, duke@435: bool is_tlab, duke@435: bool parallel = false) = 0; duke@435: duke@435: // Some generations may require some cleanup or preparation actions before duke@435: // allowing a collection. The default is to do nothing. duke@435: virtual void gc_prologue(bool full) {}; duke@435: duke@435: // Some generations may require some cleanup actions after a collection. duke@435: // The default is to do nothing. duke@435: virtual void gc_epilogue(bool full) {}; duke@435: duke@435: // Some generations may need to be "fixed-up" after some allocation duke@435: // activity to make them parsable again. The default is to do nothing. duke@435: virtual void ensure_parsability() {}; duke@435: duke@435: // Time (in ms) when we were last collected or now if a collection is duke@435: // in progress. duke@435: virtual jlong time_of_last_gc(jlong now) { duke@435: // XXX See note in genCollectedHeap::millis_since_last_gc() duke@435: NOT_PRODUCT( duke@435: if (now < _time_of_last_gc) { duke@435: warning("time warp: %d to %d", _time_of_last_gc, now); duke@435: } duke@435: ) duke@435: return _time_of_last_gc; duke@435: } duke@435: duke@435: virtual void update_time_of_last_gc(jlong now) { duke@435: _time_of_last_gc = now; duke@435: } duke@435: duke@435: // Generations may keep statistics about collection. This duke@435: // method updates those statistics. current_level is duke@435: // the level of the collection that has most recently duke@435: // occurred. This allows the generation to decide what duke@435: // statistics are valid to collect. For example, the duke@435: // generation can decide to gather the amount of promoted data duke@435: // if the collection of the younger generations has completed. duke@435: GCStats* gc_stats() const { return _gc_stats; } duke@435: virtual void update_gc_stats(int current_level, bool full) {} duke@435: duke@435: // Mark sweep support phase2 duke@435: virtual void prepare_for_compaction(CompactPoint* cp); duke@435: // Mark sweep support phase3 duke@435: virtual void pre_adjust_pointers() {ShouldNotReachHere();} duke@435: virtual void adjust_pointers(); duke@435: // Mark sweep support phase4 duke@435: virtual void compact(); duke@435: virtual void post_compact() {ShouldNotReachHere();} duke@435: duke@435: // Support for CMS's rescan. In this general form we return a pointer duke@435: // to an abstract object that can be used, based on specific previously duke@435: // decided protocols, to exchange information between generations, duke@435: // information that may be useful for speeding up certain types of duke@435: // garbage collectors. A NULL value indicates to the client that duke@435: // no data recording is expected by the provider. The data-recorder is duke@435: // expected to be GC worker thread-local, with the worker index duke@435: // indicated by "thr_num". duke@435: virtual void* get_data_recorder(int thr_num) { return NULL; } duke@435: duke@435: // Some generations may require some cleanup actions before allowing duke@435: // a verification. duke@435: virtual void prepare_for_verify() {}; duke@435: duke@435: // Accessing "marks". duke@435: duke@435: // This function gives a generation a chance to note a point between duke@435: // collections. For example, a contiguous generation might note the duke@435: // beginning allocation point post-collection, which might allow some later duke@435: // operations to be optimized. duke@435: virtual void save_marks() {} duke@435: duke@435: // This function allows generations to initialize any "saved marks". That duke@435: // is, should only be called when the generation is empty. duke@435: virtual void reset_saved_marks() {} duke@435: duke@435: // This function is "true" iff any no allocations have occurred in the duke@435: // generation since the last call to "save_marks". duke@435: virtual bool no_allocs_since_save_marks() = 0; duke@435: duke@435: // Apply "cl->apply" to (the addresses of) all reference fields in objects duke@435: // allocated in the current generation since the last call to "save_marks". duke@435: // If more objects are allocated in this generation as a result of applying duke@435: // the closure, iterates over reference fields in those objects as well. duke@435: // Calls "save_marks" at the end of the iteration. duke@435: // General signature... duke@435: virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; duke@435: // ...and specializations for de-virtualization. (The general duke@435: // implemention of the _nv versions call the virtual version. duke@435: // Note that the _nv suffix is not really semantically necessary, duke@435: // but it avoids some not-so-useful warnings on Solaris.) duke@435: #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ duke@435: virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ duke@435: oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ duke@435: } duke@435: SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) duke@435: duke@435: #undef Generation_SINCE_SAVE_MARKS_DECL duke@435: duke@435: // The "requestor" generation is performing some garbage collection duke@435: // action for which it would be useful to have scratch space. If duke@435: // the target is not the requestor, no gc actions will be required duke@435: // of the target. The requestor promises to allocate no more than duke@435: // "max_alloc_words" in the target generation (via promotion say, duke@435: // if the requestor is a young generation and the target is older). duke@435: // If the target generation can provide any scratch space, it adds duke@435: // it to "list", leaving "list" pointing to the head of the duke@435: // augmented list. The default is to offer no space. duke@435: virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, duke@435: size_t max_alloc_words) {} duke@435: duke@435: // When an older generation has been collected, and perhaps resized, duke@435: // this method will be invoked on all younger generations (from older to duke@435: // younger), allowing them to resize themselves as appropriate. duke@435: virtual void compute_new_size() = 0; duke@435: duke@435: // Printing duke@435: virtual const char* name() const = 0; duke@435: virtual const char* short_name() const = 0; duke@435: duke@435: int level() const { return _level; } duke@435: duke@435: // Attributes duke@435: duke@435: // True iff the given generation may only be the youngest generation. duke@435: virtual bool must_be_youngest() const = 0; duke@435: // True iff the given generation may only be the oldest generation. duke@435: virtual bool must_be_oldest() const = 0; duke@435: duke@435: // Reference Processing accessor duke@435: ReferenceProcessor* const ref_processor() { return _ref_processor; } duke@435: duke@435: // Iteration. duke@435: duke@435: // Iterate over all the ref-containing fields of all objects in the duke@435: // generation, calling "cl.do_oop" on each. duke@435: virtual void oop_iterate(OopClosure* cl); duke@435: duke@435: // Same as above, restricted to the intersection of a memory region and duke@435: // the generation. duke@435: virtual void oop_iterate(MemRegion mr, OopClosure* cl); duke@435: duke@435: // Iterate over all objects in the generation, calling "cl.do_object" on duke@435: // each. duke@435: virtual void object_iterate(ObjectClosure* cl); duke@435: duke@435: // Iterate over all objects allocated in the generation since the last duke@435: // collection, calling "cl.do_object" on each. The generation must have duke@435: // been initialized properly to support this function, or else this call duke@435: // will fail. duke@435: virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0; duke@435: duke@435: // Apply "cl->do_oop" to (the address of) all and only all the ref fields duke@435: // in the current generation that contain pointers to objects in younger duke@435: // generations. Objects allocated since the last "save_marks" call are duke@435: // excluded. duke@435: virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0; duke@435: duke@435: // Inform a generation that it longer contains references to objects duke@435: // in any younger generation. [e.g. Because younger gens are empty, duke@435: // clear the card table.] duke@435: virtual void clear_remembered_set() { } duke@435: duke@435: // Inform a generation that some of its objects have moved. [e.g. The duke@435: // generation's spaces were compacted, invalidating the card table.] duke@435: virtual void invalidate_remembered_set() { } duke@435: duke@435: // Block abstraction. duke@435: duke@435: // Returns the address of the start of the "block" that contains the duke@435: // address "addr". We say "blocks" instead of "object" since some heaps duke@435: // may not pack objects densely; a chunk may either be an object or a duke@435: // non-object. duke@435: virtual HeapWord* block_start(const void* addr) const; duke@435: duke@435: // Requires "addr" to be the start of a chunk, and returns its size. duke@435: // "addr + size" is required to be the start of a new chunk, or the end duke@435: // of the active area of the heap. duke@435: virtual size_t block_size(const HeapWord* addr) const ; duke@435: duke@435: // Requires "addr" to be the start of a block, and returns "TRUE" iff duke@435: // the block is an object. duke@435: virtual bool block_is_obj(const HeapWord* addr) const; duke@435: duke@435: duke@435: // PrintGC, PrintGCDetails support duke@435: void print_heap_change(size_t prev_used) const; duke@435: duke@435: // PrintHeapAtGC support duke@435: virtual void print() const; duke@435: virtual void print_on(outputStream* st) const; duke@435: duke@435: virtual void verify(bool allow_dirty) = 0; duke@435: duke@435: struct StatRecord { duke@435: int invocations; duke@435: elapsedTimer accumulated_time; duke@435: StatRecord() : duke@435: invocations(0), duke@435: accumulated_time(elapsedTimer()) {} duke@435: }; duke@435: private: duke@435: StatRecord _stat_record; duke@435: public: duke@435: StatRecord* stat_record() { return &_stat_record; } duke@435: duke@435: virtual void print_summary_info(); duke@435: virtual void print_summary_info_on(outputStream* st); duke@435: duke@435: // Performance Counter support duke@435: virtual void update_counters() = 0; duke@435: virtual CollectorCounters* counters() { return _gc_counters; } duke@435: }; duke@435: duke@435: // Class CardGeneration is a generation that is covered by a card table, duke@435: // and uses a card-size block-offset array to implement block_start. duke@435: duke@435: // class BlockOffsetArray; duke@435: // class BlockOffsetArrayContigSpace; duke@435: class BlockOffsetSharedArray; duke@435: duke@435: class CardGeneration: public Generation { duke@435: friend class VMStructs; duke@435: protected: duke@435: // This is shared with other generations. duke@435: GenRemSet* _rs; duke@435: // This is local to this generation. duke@435: BlockOffsetSharedArray* _bts; duke@435: duke@435: CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, duke@435: GenRemSet* remset); duke@435: duke@435: public: duke@435: duke@435: virtual void clear_remembered_set(); duke@435: duke@435: virtual void invalidate_remembered_set(); duke@435: duke@435: virtual void prepare_for_verify(); duke@435: }; duke@435: duke@435: // OneContigSpaceCardGeneration models a heap of old objects contained in a single duke@435: // contiguous space. duke@435: // duke@435: // Garbage collection is performed using mark-compact. duke@435: duke@435: class OneContigSpaceCardGeneration: public CardGeneration { duke@435: friend class VMStructs; duke@435: // Abstractly, this is a subtype that gets access to protected fields. duke@435: friend class CompactingPermGen; duke@435: friend class VM_PopulateDumpSharedSpace; duke@435: duke@435: protected: duke@435: size_t _min_heap_delta_bytes; // Minimum amount to expand. duke@435: ContiguousSpace* _the_space; // actual space holding objects duke@435: WaterMark _last_gc; // watermark between objects allocated before duke@435: // and after last GC. duke@435: duke@435: // Grow generation with specified size (returns false if unable to grow) duke@435: bool grow_by(size_t bytes); duke@435: // Grow generation to reserved size. duke@435: bool grow_to_reserved(); duke@435: // Shrink generation with specified size (returns false if unable to shrink) duke@435: void shrink_by(size_t bytes); duke@435: duke@435: // Allocation failure duke@435: void expand(size_t bytes, size_t expand_bytes); duke@435: void shrink(size_t bytes); duke@435: duke@435: // Accessing spaces duke@435: ContiguousSpace* the_space() const { return _the_space; } duke@435: duke@435: public: duke@435: OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, duke@435: size_t min_heap_delta_bytes, duke@435: int level, GenRemSet* remset, duke@435: ContiguousSpace* space) : duke@435: CardGeneration(rs, initial_byte_size, level, remset), duke@435: _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes) duke@435: {} duke@435: duke@435: inline bool is_in(const void* p) const; duke@435: duke@435: // Space enquiries duke@435: size_t capacity() const; duke@435: size_t used() const; duke@435: size_t free() const; duke@435: duke@435: MemRegion used_region() const; duke@435: duke@435: size_t unsafe_max_alloc_nogc() const; duke@435: size_t contiguous_available() const; duke@435: duke@435: // Iteration duke@435: void object_iterate(ObjectClosure* blk); duke@435: void space_iterate(SpaceClosure* blk, bool usedOnly = false); duke@435: void object_iterate_since_last_GC(ObjectClosure* cl); duke@435: duke@435: void younger_refs_iterate(OopsInGenClosure* blk); duke@435: duke@435: inline CompactibleSpace* first_compaction_space() const; duke@435: duke@435: virtual inline HeapWord* allocate(size_t word_size, bool is_tlab); duke@435: virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab); duke@435: duke@435: // Accessing marks duke@435: inline WaterMark top_mark(); duke@435: inline WaterMark bottom_mark(); duke@435: duke@435: #define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ duke@435: void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); duke@435: OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v) duke@435: SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL) duke@435: duke@435: void save_marks(); duke@435: void reset_saved_marks(); duke@435: bool no_allocs_since_save_marks(); duke@435: duke@435: inline size_t block_size(const HeapWord* addr) const; duke@435: duke@435: inline bool block_is_obj(const HeapWord* addr) const; duke@435: duke@435: virtual void collect(bool full, duke@435: bool clear_all_soft_refs, duke@435: size_t size, duke@435: bool is_tlab); duke@435: HeapWord* expand_and_allocate(size_t size, duke@435: bool is_tlab, duke@435: bool parallel = false); duke@435: duke@435: virtual void prepare_for_verify(); duke@435: duke@435: virtual void gc_epilogue(bool full); duke@435: duke@435: virtual void verify(bool allow_dirty); duke@435: virtual void print_on(outputStream* st) const; duke@435: };