aoqi@0: /* aoqi@0: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #ifndef SHARE_VM_MEMORY_GENERATION_HPP aoqi@0: #define SHARE_VM_MEMORY_GENERATION_HPP aoqi@0: aoqi@0: #include "gc_implementation/shared/collectorCounters.hpp" aoqi@0: #include "memory/allocation.hpp" aoqi@0: #include "memory/memRegion.hpp" aoqi@0: #include "memory/referenceProcessor.hpp" aoqi@0: #include "memory/universe.hpp" aoqi@0: #include "memory/watermark.hpp" aoqi@0: #include "runtime/mutex.hpp" aoqi@0: #include "runtime/perfData.hpp" aoqi@0: #include "runtime/virtualspace.hpp" aoqi@0: aoqi@0: // A Generation models a heap area for similarly-aged objects. aoqi@0: // It will contain one ore more spaces holding the actual objects. aoqi@0: // aoqi@0: // The Generation class hierarchy: aoqi@0: // aoqi@0: // Generation - abstract base class aoqi@0: // - DefNewGeneration - allocation area (copy collected) aoqi@0: // - ParNewGeneration - a DefNewGeneration that is collected by aoqi@0: // several threads aoqi@0: // - CardGeneration - abstract class adding offset array behavior aoqi@0: // - OneContigSpaceCardGeneration - abstract class holding a single aoqi@0: // contiguous space with card marking aoqi@0: // - TenuredGeneration - tenured (old object) space (markSweepCompact) aoqi@0: // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation aoqi@0: // (Detlefs-Printezis refinement of aoqi@0: // Boehm-Demers-Schenker) aoqi@0: // aoqi@0: // The system configurations currently allowed are: aoqi@0: // aoqi@0: // DefNewGeneration + TenuredGeneration aoqi@0: // DefNewGeneration + ConcurrentMarkSweepGeneration aoqi@0: // aoqi@0: // ParNewGeneration + TenuredGeneration aoqi@0: // ParNewGeneration + ConcurrentMarkSweepGeneration aoqi@0: // aoqi@0: aoqi@0: class DefNewGeneration; aoqi@0: class GenerationSpec; aoqi@0: class CompactibleSpace; aoqi@0: class ContiguousSpace; aoqi@0: class CompactPoint; aoqi@0: class OopsInGenClosure; aoqi@0: class OopClosure; aoqi@0: class ScanClosure; aoqi@0: class FastScanClosure; aoqi@0: class GenCollectedHeap; aoqi@0: class GenRemSet; aoqi@0: class GCStats; aoqi@0: aoqi@0: // A "ScratchBlock" represents a block of memory in one generation usable by aoqi@0: // another. It represents "num_words" free words, starting at and including aoqi@0: // the address of "this". aoqi@0: struct ScratchBlock { aoqi@0: ScratchBlock* next; aoqi@0: size_t num_words; aoqi@0: HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming aoqi@0: // first two fields are word-sized.) aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: class Generation: public CHeapObj { aoqi@0: friend class VMStructs; aoqi@0: private: aoqi@0: jlong _time_of_last_gc; // time when last gc on this generation happened (ms) aoqi@0: MemRegion _prev_used_region; // for collectors that want to "remember" a value for aoqi@0: // used region at some specific point during collection. aoqi@0: aoqi@0: protected: aoqi@0: // Minimum and maximum addresses for memory reserved (not necessarily aoqi@0: // committed) for generation. aoqi@0: // Used by card marking code. Must not overlap with address ranges of aoqi@0: // other generations. aoqi@0: MemRegion _reserved; aoqi@0: aoqi@0: // Memory area reserved for generation aoqi@0: VirtualSpace _virtual_space; aoqi@0: aoqi@0: // Level in the generation hierarchy. aoqi@0: int _level; aoqi@0: aoqi@0: // ("Weak") Reference processing support aoqi@0: ReferenceProcessor* _ref_processor; aoqi@0: aoqi@0: // Performance Counters aoqi@0: CollectorCounters* _gc_counters; aoqi@0: aoqi@0: // Statistics for garbage collection aoqi@0: GCStats* _gc_stats; aoqi@0: aoqi@0: // Returns the next generation in the configuration, or else NULL if this aoqi@0: // is the highest generation. aoqi@0: Generation* next_gen() const; aoqi@0: aoqi@0: // Initialize the generation. aoqi@0: Generation(ReservedSpace rs, size_t initial_byte_size, int level); aoqi@0: aoqi@0: // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in aoqi@0: // "sp" that point into younger generations. aoqi@0: // The iteration is only over objects allocated at the start of the aoqi@0: // iterations; objects allocated as a result of applying the closure are aoqi@0: // not included. aoqi@0: void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); aoqi@0: aoqi@0: public: aoqi@0: // The set of possible generation kinds. aoqi@0: enum Name { aoqi@0: ASParNew, aoqi@0: ASConcurrentMarkSweep, aoqi@0: DefNew, aoqi@0: ParNew, aoqi@0: MarkSweepCompact, aoqi@0: ConcurrentMarkSweep, aoqi@0: Other aoqi@0: }; aoqi@0: aoqi@0: enum SomePublicConstants { aoqi@0: // Generations are GenGrain-aligned and have size that are multiples of aoqi@0: // GenGrain. aoqi@0: // Note: on ARM we add 1 bit for card_table_base to be properly aligned aoqi@0: // (we expect its low byte to be zero - see implementation of post_barrier) aoqi@0: LogOfGenGrain = 16 ARM_ONLY(+1), aoqi@0: GenGrain = 1 << LogOfGenGrain aoqi@0: }; aoqi@0: aoqi@0: // allocate and initialize ("weak") refs processing support aoqi@0: virtual void ref_processor_init(); aoqi@0: void set_ref_processor(ReferenceProcessor* rp) { aoqi@0: assert(_ref_processor == NULL, "clobbering existing _ref_processor"); aoqi@0: _ref_processor = rp; aoqi@0: } aoqi@0: aoqi@0: virtual Generation::Name kind() { return Generation::Other; } aoqi@0: GenerationSpec* spec(); aoqi@0: aoqi@0: // This properly belongs in the collector, but for now this aoqi@0: // will do. aoqi@0: virtual bool refs_discovery_is_atomic() const { return true; } aoqi@0: virtual bool refs_discovery_is_mt() const { return false; } aoqi@0: aoqi@0: // Space enquiries (results in bytes) aoqi@0: virtual size_t capacity() const = 0; // The maximum number of object bytes the aoqi@0: // generation can currently hold. aoqi@0: virtual size_t used() const = 0; // The number of used bytes in the gen. aoqi@0: virtual size_t free() const = 0; // The number of free bytes in the gen. aoqi@0: aoqi@0: // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. aoqi@0: // Returns the total number of bytes available in a generation aoqi@0: // for the allocation of objects. aoqi@0: virtual size_t max_capacity() const; aoqi@0: aoqi@0: // If this is a young generation, the maximum number of bytes that can be aoqi@0: // allocated in this generation before a GC is triggered. aoqi@0: virtual size_t capacity_before_gc() const { return 0; } aoqi@0: aoqi@0: // The largest number of contiguous free bytes in the generation, aoqi@0: // including expansion (Assumes called at a safepoint.) aoqi@0: virtual size_t contiguous_available() const = 0; aoqi@0: // The largest number of contiguous free bytes in this or any higher generation. aoqi@0: virtual size_t max_contiguous_available() const; aoqi@0: aoqi@0: // Returns true if promotions of the specified amount are aoqi@0: // likely to succeed without a promotion failure. aoqi@0: // Promotion of the full amount is not guaranteed but aoqi@0: // might be attempted in the worst case. aoqi@0: virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; aoqi@0: aoqi@0: // For a non-young generation, this interface can be used to inform a aoqi@0: // generation that a promotion attempt into that generation failed. aoqi@0: // Typically used to enable diagnostic output for post-mortem analysis, aoqi@0: // but other uses of the interface are not ruled out. aoqi@0: virtual void promotion_failure_occurred() { /* does nothing */ } aoqi@0: aoqi@0: // Return an estimate of the maximum allocation that could be performed aoqi@0: // in the generation without triggering any collection or expansion aoqi@0: // activity. It is "unsafe" because no locks are taken; the result aoqi@0: // should be treated as an approximation, not a guarantee, for use in aoqi@0: // heuristic resizing decisions. aoqi@0: virtual size_t unsafe_max_alloc_nogc() const = 0; aoqi@0: aoqi@0: // Returns true if this generation cannot be expanded further aoqi@0: // without a GC. Override as appropriate. aoqi@0: virtual bool is_maximal_no_gc() const { aoqi@0: return _virtual_space.uncommitted_size() == 0; aoqi@0: } aoqi@0: aoqi@0: MemRegion reserved() const { return _reserved; } aoqi@0: aoqi@0: // Returns a region guaranteed to contain all the objects in the aoqi@0: // generation. aoqi@0: virtual MemRegion used_region() const { return _reserved; } aoqi@0: aoqi@0: MemRegion prev_used_region() const { return _prev_used_region; } aoqi@0: virtual void save_used_region() { _prev_used_region = used_region(); } aoqi@0: aoqi@0: // Returns "TRUE" iff "p" points into the committed areas in the generation. aoqi@0: // For some kinds of generations, this may be an expensive operation. aoqi@0: // To avoid performance problems stemming from its inadvertent use in aoqi@0: // product jvm's, we restrict its use to assertion checking or aoqi@0: // verification only. aoqi@0: virtual bool is_in(const void* p) const; aoqi@0: aoqi@0: /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ aoqi@0: bool is_in_reserved(const void* p) const { aoqi@0: return _reserved.contains(p); aoqi@0: } aoqi@0: aoqi@0: // Check that the generation kind is DefNewGeneration or a sub aoqi@0: // class of DefNewGeneration and return a DefNewGeneration* aoqi@0: DefNewGeneration* as_DefNewGeneration(); aoqi@0: aoqi@0: // If some space in the generation contains the given "addr", return a aoqi@0: // pointer to that space, else return "NULL". aoqi@0: virtual Space* space_containing(const void* addr) const; aoqi@0: aoqi@0: // Iteration - do not use for time critical operations aoqi@0: virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; aoqi@0: aoqi@0: // Returns the first space, if any, in the generation that can participate aoqi@0: // in compaction, or else "NULL". aoqi@0: virtual CompactibleSpace* first_compaction_space() const = 0; aoqi@0: aoqi@0: // Returns "true" iff this generation should be used to allocate an aoqi@0: // object of the given size. Young generations might aoqi@0: // wish to exclude very large objects, for example, since, if allocated aoqi@0: // often, they would greatly increase the frequency of young-gen aoqi@0: // collection. aoqi@0: virtual bool should_allocate(size_t word_size, bool is_tlab) { aoqi@0: bool result = false; aoqi@0: size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); aoqi@0: if (!is_tlab || supports_tlab_allocation()) { aoqi@0: result = (word_size > 0) && (word_size < overflow_limit); aoqi@0: } aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: // Allocate and returns a block of the requested size, or returns "NULL". aoqi@0: // Assumes the caller has done any necessary locking. aoqi@0: virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; aoqi@0: aoqi@0: // Like "allocate", but performs any necessary locking internally. aoqi@0: virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; aoqi@0: aoqi@0: // A 'younger' gen has reached an allocation limit, and uses this to notify aoqi@0: // the next older gen. The return value is a new limit, or NULL if none. The aoqi@0: // caller must do the necessary locking. aoqi@0: virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top, aoqi@0: size_t word_size) { aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: // Some generation may offer a region for shared, contiguous allocation, aoqi@0: // via inlined code (by exporting the address of the top and end fields aoqi@0: // defining the extent of the contiguous allocation region.) aoqi@0: aoqi@0: // This function returns "true" iff the heap supports this kind of aoqi@0: // allocation. (More precisely, this means the style of allocation that aoqi@0: // increments *top_addr()" with a CAS.) (Default is "no".) aoqi@0: // A generation that supports this allocation style must use lock-free aoqi@0: // allocation for *all* allocation, since there are times when lock free aoqi@0: // allocation will be concurrent with plain "allocate" calls. aoqi@0: virtual bool supports_inline_contig_alloc() const { return false; } aoqi@0: aoqi@0: // These functions return the addresses of the fields that define the aoqi@0: // boundaries of the contiguous allocation area. (These fields should be aoqi@0: // physicall near to one another.) aoqi@0: virtual HeapWord** top_addr() const { return NULL; } aoqi@0: virtual HeapWord** end_addr() const { return NULL; } aoqi@0: aoqi@0: // Thread-local allocation buffers aoqi@0: virtual bool supports_tlab_allocation() const { return false; } aoqi@0: virtual size_t tlab_capacity() const { aoqi@0: guarantee(false, "Generation doesn't support thread local allocation buffers"); aoqi@0: return 0; aoqi@0: } aoqi@0: virtual size_t tlab_used() const { aoqi@0: guarantee(false, "Generation doesn't support thread local allocation buffers"); aoqi@0: return 0; aoqi@0: } aoqi@0: virtual size_t unsafe_max_tlab_alloc() const { aoqi@0: guarantee(false, "Generation doesn't support thread local allocation buffers"); aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: // "obj" is the address of an object in a younger generation. Allocate space aoqi@0: // for "obj" in the current (or some higher) generation, and copy "obj" into aoqi@0: // the newly allocated space, if possible, returning the result (or NULL if aoqi@0: // the allocation failed). aoqi@0: // aoqi@0: // The "obj_size" argument is just obj->size(), passed along so the caller can aoqi@0: // avoid repeating the virtual call to retrieve it. aoqi@0: virtual oop promote(oop obj, size_t obj_size); aoqi@0: aoqi@0: // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote aoqi@0: // object "obj", whose original mark word was "m", and whose size is aoqi@0: // "word_sz". If possible, allocate space for "obj", copy obj into it aoqi@0: // (taking care to copy "m" into the mark word when done, since the mark aoqi@0: // word of "obj" may have been overwritten with a forwarding pointer, and aoqi@0: // also taking care to copy the klass pointer *last*. Returns the new aoqi@0: // object if successful, or else NULL. aoqi@0: virtual oop par_promote(int thread_num, aoqi@0: oop obj, markOop m, size_t word_sz); aoqi@0: aoqi@0: // Undo, if possible, the most recent par_promote_alloc allocation by aoqi@0: // "thread_num" ("obj", of "word_sz"). aoqi@0: virtual void par_promote_alloc_undo(int thread_num, aoqi@0: HeapWord* obj, size_t word_sz); aoqi@0: aoqi@0: // Informs the current generation that all par_promote_alloc's in the aoqi@0: // collection have been completed; any supporting data structures can be aoqi@0: // reset. Default is to do nothing. aoqi@0: virtual void par_promote_alloc_done(int thread_num) {} aoqi@0: aoqi@0: // Informs the current generation that all oop_since_save_marks_iterates aoqi@0: // performed by "thread_num" in the current collection, if any, have been aoqi@0: // completed; any supporting data structures can be reset. Default is to aoqi@0: // do nothing. aoqi@0: virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} aoqi@0: aoqi@0: // This generation will collect all younger generations aoqi@0: // during a full collection. aoqi@0: virtual bool full_collects_younger_generations() const { return false; } aoqi@0: aoqi@0: // This generation does in-place marking, meaning that mark words aoqi@0: // are mutated during the marking phase and presumably reinitialized aoqi@0: // to a canonical value after the GC. This is currently used by the aoqi@0: // biased locking implementation to determine whether additional aoqi@0: // work is required during the GC prologue and epilogue. aoqi@0: virtual bool performs_in_place_marking() const { return true; } aoqi@0: aoqi@0: // Returns "true" iff collect() should subsequently be called on this aoqi@0: // this generation. See comment below. aoqi@0: // This is a generic implementation which can be overridden. aoqi@0: // aoqi@0: // Note: in the current (1.4) implementation, when genCollectedHeap's aoqi@0: // incremental_collection_will_fail flag is set, all allocations are aoqi@0: // slow path (the only fast-path place to allocate is DefNew, which aoqi@0: // will be full if the flag is set). aoqi@0: // Thus, older generations which collect younger generations should aoqi@0: // test this flag and collect if it is set. aoqi@0: virtual bool should_collect(bool full, aoqi@0: size_t word_size, aoqi@0: bool is_tlab) { aoqi@0: return (full || should_allocate(word_size, is_tlab)); aoqi@0: } aoqi@0: aoqi@0: // Returns true if the collection is likely to be safely aoqi@0: // completed. Even if this method returns true, a collection aoqi@0: // may not be guaranteed to succeed, and the system should be aoqi@0: // able to safely unwind and recover from that failure, albeit aoqi@0: // at some additional cost. aoqi@0: virtual bool collection_attempt_is_safe() { aoqi@0: guarantee(false, "Are you sure you want to call this method?"); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: // Perform a garbage collection. aoqi@0: // If full is true attempt a full garbage collection of this generation. aoqi@0: // Otherwise, attempting to (at least) free enough space to support an aoqi@0: // allocation of the given "word_size". aoqi@0: virtual void collect(bool full, aoqi@0: bool clear_all_soft_refs, aoqi@0: size_t word_size, aoqi@0: bool is_tlab) = 0; aoqi@0: aoqi@0: // Perform a heap collection, attempting to create (at least) enough aoqi@0: // space to support an allocation of the given "word_size". If aoqi@0: // successful, perform the allocation and return the resulting aoqi@0: // "oop" (initializing the allocated block). If the allocation is aoqi@0: // still unsuccessful, return "NULL". aoqi@0: virtual HeapWord* expand_and_allocate(size_t word_size, aoqi@0: bool is_tlab, aoqi@0: bool parallel = false) = 0; aoqi@0: aoqi@0: // Some generations may require some cleanup or preparation actions before aoqi@0: // allowing a collection. The default is to do nothing. aoqi@0: virtual void gc_prologue(bool full) {}; aoqi@0: aoqi@0: // Some generations may require some cleanup actions after a collection. aoqi@0: // The default is to do nothing. aoqi@0: virtual void gc_epilogue(bool full) {}; aoqi@0: aoqi@0: // Save the high water marks for the used space in a generation. aoqi@0: virtual void record_spaces_top() {}; aoqi@0: aoqi@0: // Some generations may need to be "fixed-up" after some allocation aoqi@0: // activity to make them parsable again. The default is to do nothing. aoqi@0: virtual void ensure_parsability() {}; aoqi@0: aoqi@0: // Time (in ms) when we were last collected or now if a collection is aoqi@0: // in progress. aoqi@0: virtual jlong time_of_last_gc(jlong now) { aoqi@0: // Both _time_of_last_gc and now are set using a time source aoqi@0: // that guarantees monotonically non-decreasing values provided aoqi@0: // the underlying platform provides such a source. So we still aoqi@0: // have to guard against non-monotonicity. aoqi@0: NOT_PRODUCT( aoqi@0: if (now < _time_of_last_gc) { aoqi@0: warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, (int64_t)_time_of_last_gc, (int64_t)now); aoqi@0: } aoqi@0: ) aoqi@0: return _time_of_last_gc; aoqi@0: } aoqi@0: aoqi@0: virtual void update_time_of_last_gc(jlong now) { aoqi@0: _time_of_last_gc = now; aoqi@0: } aoqi@0: aoqi@0: // Generations may keep statistics about collection. This aoqi@0: // method updates those statistics. current_level is aoqi@0: // the level of the collection that has most recently aoqi@0: // occurred. This allows the generation to decide what aoqi@0: // statistics are valid to collect. For example, the aoqi@0: // generation can decide to gather the amount of promoted data aoqi@0: // if the collection of the younger generations has completed. aoqi@0: GCStats* gc_stats() const { return _gc_stats; } aoqi@0: virtual void update_gc_stats(int current_level, bool full) {} aoqi@0: aoqi@0: // Mark sweep support phase2 aoqi@0: virtual void prepare_for_compaction(CompactPoint* cp); aoqi@0: // Mark sweep support phase3 aoqi@0: virtual void adjust_pointers(); aoqi@0: // Mark sweep support phase4 aoqi@0: virtual void compact(); aoqi@0: virtual void post_compact() {ShouldNotReachHere();} aoqi@0: aoqi@0: // Support for CMS's rescan. In this general form we return a pointer aoqi@0: // to an abstract object that can be used, based on specific previously aoqi@0: // decided protocols, to exchange information between generations, aoqi@0: // information that may be useful for speeding up certain types of aoqi@0: // garbage collectors. A NULL value indicates to the client that aoqi@0: // no data recording is expected by the provider. The data-recorder is aoqi@0: // expected to be GC worker thread-local, with the worker index aoqi@0: // indicated by "thr_num". aoqi@0: virtual void* get_data_recorder(int thr_num) { return NULL; } aoqi@0: virtual void sample_eden_chunk() {} aoqi@0: aoqi@0: // Some generations may require some cleanup actions before allowing aoqi@0: // a verification. aoqi@0: virtual void prepare_for_verify() {}; aoqi@0: aoqi@0: // Accessing "marks". aoqi@0: aoqi@0: // This function gives a generation a chance to note a point between aoqi@0: // collections. For example, a contiguous generation might note the aoqi@0: // beginning allocation point post-collection, which might allow some later aoqi@0: // operations to be optimized. aoqi@0: virtual void save_marks() {} aoqi@0: aoqi@0: // This function allows generations to initialize any "saved marks". That aoqi@0: // is, should only be called when the generation is empty. aoqi@0: virtual void reset_saved_marks() {} aoqi@0: aoqi@0: // This function is "true" iff any no allocations have occurred in the aoqi@0: // generation since the last call to "save_marks". aoqi@0: virtual bool no_allocs_since_save_marks() = 0; aoqi@0: aoqi@0: // Apply "cl->apply" to (the addresses of) all reference fields in objects aoqi@0: // allocated in the current generation since the last call to "save_marks". aoqi@0: // If more objects are allocated in this generation as a result of applying aoqi@0: // the closure, iterates over reference fields in those objects as well. aoqi@0: // Calls "save_marks" at the end of the iteration. aoqi@0: // General signature... aoqi@0: virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; aoqi@0: // ...and specializations for de-virtualization. (The general aoqi@0: // implemention of the _nv versions call the virtual version. aoqi@0: // Note that the _nv suffix is not really semantically necessary, aoqi@0: // but it avoids some not-so-useful warnings on Solaris.) aoqi@0: #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ aoqi@0: virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ aoqi@0: oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ aoqi@0: } aoqi@0: SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) aoqi@0: aoqi@0: #undef Generation_SINCE_SAVE_MARKS_DECL aoqi@0: aoqi@0: // The "requestor" generation is performing some garbage collection aoqi@0: // action for which it would be useful to have scratch space. If aoqi@0: // the target is not the requestor, no gc actions will be required aoqi@0: // of the target. The requestor promises to allocate no more than aoqi@0: // "max_alloc_words" in the target generation (via promotion say, aoqi@0: // if the requestor is a young generation and the target is older). aoqi@0: // If the target generation can provide any scratch space, it adds aoqi@0: // it to "list", leaving "list" pointing to the head of the aoqi@0: // augmented list. The default is to offer no space. aoqi@0: virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, aoqi@0: size_t max_alloc_words) {} aoqi@0: aoqi@0: // Give each generation an opportunity to do clean up for any aoqi@0: // contributed scratch. aoqi@0: virtual void reset_scratch() {}; aoqi@0: aoqi@0: // When an older generation has been collected, and perhaps resized, aoqi@0: // this method will be invoked on all younger generations (from older to aoqi@0: // younger), allowing them to resize themselves as appropriate. aoqi@0: virtual void compute_new_size() = 0; aoqi@0: aoqi@0: // Printing aoqi@0: virtual const char* name() const = 0; aoqi@0: virtual const char* short_name() const = 0; aoqi@0: aoqi@0: int level() const { return _level; } aoqi@0: aoqi@0: // Attributes aoqi@0: aoqi@0: // True iff the given generation may only be the youngest generation. aoqi@0: virtual bool must_be_youngest() const = 0; aoqi@0: // True iff the given generation may only be the oldest generation. aoqi@0: virtual bool must_be_oldest() const = 0; aoqi@0: aoqi@0: // Reference Processing accessor aoqi@0: ReferenceProcessor* const ref_processor() { return _ref_processor; } aoqi@0: aoqi@0: // Iteration. aoqi@0: aoqi@0: // Iterate over all the ref-containing fields of all objects in the aoqi@0: // generation, calling "cl.do_oop" on each. aoqi@0: virtual void oop_iterate(ExtendedOopClosure* cl); aoqi@0: aoqi@0: // Iterate over all objects in the generation, calling "cl.do_object" on aoqi@0: // each. aoqi@0: virtual void object_iterate(ObjectClosure* cl); aoqi@0: aoqi@0: // Iterate over all safe objects in the generation, calling "cl.do_object" on aoqi@0: // each. An object is safe if its references point to other objects in aoqi@0: // the heap. This defaults to object_iterate() unless overridden. aoqi@0: virtual void safe_object_iterate(ObjectClosure* cl); aoqi@0: aoqi@0: // Apply "cl->do_oop" to (the address of) all and only all the ref fields aoqi@0: // in the current generation that contain pointers to objects in younger aoqi@0: // generations. Objects allocated since the last "save_marks" call are aoqi@0: // excluded. aoqi@0: virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0; aoqi@0: aoqi@0: // Inform a generation that it longer contains references to objects aoqi@0: // in any younger generation. [e.g. Because younger gens are empty, aoqi@0: // clear the card table.] aoqi@0: virtual void clear_remembered_set() { } aoqi@0: aoqi@0: // Inform a generation that some of its objects have moved. [e.g. The aoqi@0: // generation's spaces were compacted, invalidating the card table.] aoqi@0: virtual void invalidate_remembered_set() { } aoqi@0: aoqi@0: // Block abstraction. aoqi@0: aoqi@0: // Returns the address of the start of the "block" that contains the aoqi@0: // address "addr". We say "blocks" instead of "object" since some heaps aoqi@0: // may not pack objects densely; a chunk may either be an object or a aoqi@0: // non-object. aoqi@0: virtual HeapWord* block_start(const void* addr) const; aoqi@0: aoqi@0: // Requires "addr" to be the start of a chunk, and returns its size. aoqi@0: // "addr + size" is required to be the start of a new chunk, or the end aoqi@0: // of the active area of the heap. aoqi@0: virtual size_t block_size(const HeapWord* addr) const ; aoqi@0: aoqi@0: // Requires "addr" to be the start of a block, and returns "TRUE" iff aoqi@0: // the block is an object. aoqi@0: virtual bool block_is_obj(const HeapWord* addr) const; aoqi@0: aoqi@0: aoqi@0: // PrintGC, PrintGCDetails support aoqi@0: void print_heap_change(size_t prev_used) const; aoqi@0: aoqi@0: // PrintHeapAtGC support aoqi@0: virtual void print() const; aoqi@0: virtual void print_on(outputStream* st) const; aoqi@0: aoqi@0: virtual void verify() = 0; aoqi@0: aoqi@0: struct StatRecord { aoqi@0: int invocations; aoqi@0: elapsedTimer accumulated_time; aoqi@0: StatRecord() : aoqi@0: invocations(0), aoqi@0: accumulated_time(elapsedTimer()) {} aoqi@0: }; aoqi@0: private: aoqi@0: StatRecord _stat_record; aoqi@0: public: aoqi@0: StatRecord* stat_record() { return &_stat_record; } aoqi@0: aoqi@0: virtual void print_summary_info(); aoqi@0: virtual void print_summary_info_on(outputStream* st); aoqi@0: aoqi@0: // Performance Counter support aoqi@0: virtual void update_counters() = 0; aoqi@0: virtual CollectorCounters* counters() { return _gc_counters; } aoqi@0: }; aoqi@0: aoqi@0: // Class CardGeneration is a generation that is covered by a card table, aoqi@0: // and uses a card-size block-offset array to implement block_start. aoqi@0: aoqi@0: // class BlockOffsetArray; aoqi@0: // class BlockOffsetArrayContigSpace; aoqi@0: class BlockOffsetSharedArray; aoqi@0: aoqi@0: class CardGeneration: public Generation { aoqi@0: friend class VMStructs; aoqi@0: protected: aoqi@0: // This is shared with other generations. aoqi@0: GenRemSet* _rs; aoqi@0: // This is local to this generation. aoqi@0: BlockOffsetSharedArray* _bts; aoqi@0: aoqi@0: // current shrinking effect: this damps shrinking when the heap gets empty. aoqi@0: size_t _shrink_factor; aoqi@0: aoqi@0: size_t _min_heap_delta_bytes; // Minimum amount to expand. aoqi@0: aoqi@0: // Some statistics from before gc started. aoqi@0: // These are gathered in the gc_prologue (and should_collect) aoqi@0: // to control growing/shrinking policy in spite of promotions. aoqi@0: size_t _capacity_at_prologue; aoqi@0: size_t _used_at_prologue; aoqi@0: aoqi@0: CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, aoqi@0: GenRemSet* remset); aoqi@0: aoqi@0: public: aoqi@0: aoqi@0: // Attempt to expand the generation by "bytes". Expand by at a aoqi@0: // minimum "expand_bytes". Return true if some amount (not aoqi@0: // necessarily the full "bytes") was done. aoqi@0: virtual bool expand(size_t bytes, size_t expand_bytes); aoqi@0: aoqi@0: // Shrink generation with specified size (returns false if unable to shrink) aoqi@0: virtual void shrink(size_t bytes) = 0; aoqi@0: aoqi@0: virtual void compute_new_size(); aoqi@0: aoqi@0: virtual void clear_remembered_set(); aoqi@0: aoqi@0: virtual void invalidate_remembered_set(); aoqi@0: aoqi@0: virtual void prepare_for_verify(); aoqi@0: aoqi@0: // Grow generation with specified size (returns false if unable to grow) aoqi@0: virtual bool grow_by(size_t bytes) = 0; aoqi@0: // Grow generation to reserved size. aoqi@0: virtual bool grow_to_reserved() = 0; aoqi@0: }; aoqi@0: aoqi@0: // OneContigSpaceCardGeneration models a heap of old objects contained in a single aoqi@0: // contiguous space. aoqi@0: // aoqi@0: // Garbage collection is performed using mark-compact. aoqi@0: aoqi@0: class OneContigSpaceCardGeneration: public CardGeneration { aoqi@0: friend class VMStructs; aoqi@0: // Abstractly, this is a subtype that gets access to protected fields. aoqi@0: friend class VM_PopulateDumpSharedSpace; aoqi@0: aoqi@0: protected: aoqi@0: ContiguousSpace* _the_space; // actual space holding objects aoqi@0: WaterMark _last_gc; // watermark between objects allocated before aoqi@0: // and after last GC. aoqi@0: aoqi@0: // Grow generation with specified size (returns false if unable to grow) aoqi@0: virtual bool grow_by(size_t bytes); aoqi@0: // Grow generation to reserved size. aoqi@0: virtual bool grow_to_reserved(); aoqi@0: // Shrink generation with specified size (returns false if unable to shrink) aoqi@0: void shrink_by(size_t bytes); aoqi@0: aoqi@0: // Allocation failure aoqi@0: virtual bool expand(size_t bytes, size_t expand_bytes); aoqi@0: void shrink(size_t bytes); aoqi@0: aoqi@0: // Accessing spaces aoqi@0: ContiguousSpace* the_space() const { return _the_space; } aoqi@0: aoqi@0: public: aoqi@0: OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, aoqi@0: int level, GenRemSet* remset, aoqi@0: ContiguousSpace* space) : aoqi@0: CardGeneration(rs, initial_byte_size, level, remset), aoqi@0: _the_space(space) aoqi@0: {} aoqi@0: aoqi@0: inline bool is_in(const void* p) const; aoqi@0: aoqi@0: // Space enquiries aoqi@0: size_t capacity() const; aoqi@0: size_t used() const; aoqi@0: size_t free() const; aoqi@0: aoqi@0: MemRegion used_region() const; aoqi@0: aoqi@0: size_t unsafe_max_alloc_nogc() const; aoqi@0: size_t contiguous_available() const; aoqi@0: aoqi@0: // Iteration aoqi@0: void object_iterate(ObjectClosure* blk); aoqi@0: void space_iterate(SpaceClosure* blk, bool usedOnly = false); aoqi@0: aoqi@0: void younger_refs_iterate(OopsInGenClosure* blk); aoqi@0: aoqi@0: inline CompactibleSpace* first_compaction_space() const; aoqi@0: aoqi@0: virtual inline HeapWord* allocate(size_t word_size, bool is_tlab); aoqi@0: virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab); aoqi@0: aoqi@0: // Accessing marks aoqi@0: inline WaterMark top_mark(); aoqi@0: inline WaterMark bottom_mark(); aoqi@0: aoqi@0: #define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ aoqi@0: void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); aoqi@0: OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v) aoqi@0: SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL) aoqi@0: aoqi@0: void save_marks(); aoqi@0: void reset_saved_marks(); aoqi@0: bool no_allocs_since_save_marks(); aoqi@0: aoqi@0: inline size_t block_size(const HeapWord* addr) const; aoqi@0: aoqi@0: inline bool block_is_obj(const HeapWord* addr) const; aoqi@0: aoqi@0: virtual void collect(bool full, aoqi@0: bool clear_all_soft_refs, aoqi@0: size_t size, aoqi@0: bool is_tlab); aoqi@0: HeapWord* expand_and_allocate(size_t size, aoqi@0: bool is_tlab, aoqi@0: bool parallel = false); aoqi@0: aoqi@0: virtual void prepare_for_verify(); aoqi@0: aoqi@0: virtual void gc_epilogue(bool full); aoqi@0: aoqi@0: virtual void record_spaces_top(); aoqi@0: aoqi@0: virtual void verify(); aoqi@0: virtual void print_on(outputStream* st) const; aoqi@0: }; aoqi@0: aoqi@0: #endif // SHARE_VM_MEMORY_GENERATION_HPP