src/share/vm/memory/generation.hpp

Wed, 23 Dec 2009 09:23:54 -0800

author
ysr
date
Wed, 23 Dec 2009 09:23:54 -0800
changeset 1580
e018e6884bd8
parent 1014
0fbdb4381b99
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6631166: CMS: better heuristics when combatting fragmentation
Summary: Autonomic per-worker free block cache sizing, tunable coalition policies, fixes to per-size block statistics, retuned gain and bandwidth of some feedback loop filters to allow quicker reactivity to abrupt changes in ambient demand, and other heuristics to reduce fragmentation of the CMS old gen. Also tightened some assertions, including those related to locking.
Reviewed-by: jmasa

duke@435 1 /*
xdono@1014 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 // A Generation models a heap area for similarly-aged objects.
duke@435 26 // It will contain one ore more spaces holding the actual objects.
duke@435 27 //
duke@435 28 // The Generation class hierarchy:
duke@435 29 //
duke@435 30 // Generation - abstract base class
duke@435 31 // - DefNewGeneration - allocation area (copy collected)
duke@435 32 // - ParNewGeneration - a DefNewGeneration that is collected by
duke@435 33 // several threads
duke@435 34 // - CardGeneration - abstract class adding offset array behavior
duke@435 35 // - OneContigSpaceCardGeneration - abstract class holding a single
duke@435 36 // contiguous space with card marking
duke@435 37 // - TenuredGeneration - tenured (old object) space (markSweepCompact)
duke@435 38 // - CompactingPermGenGen - reflective object area (klasses, methods, symbols, ...)
duke@435 39 // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation
duke@435 40 // (Detlefs-Printezis refinement of
duke@435 41 // Boehm-Demers-Schenker)
duke@435 42 //
duke@435 43 // The system configurations currently allowed are:
duke@435 44 //
duke@435 45 // DefNewGeneration + TenuredGeneration + PermGeneration
duke@435 46 // DefNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen
duke@435 47 //
duke@435 48 // ParNewGeneration + TenuredGeneration + PermGeneration
duke@435 49 // ParNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen
duke@435 50 //
duke@435 51
duke@435 52 class DefNewGeneration;
duke@435 53 class GenerationSpec;
duke@435 54 class CompactibleSpace;
duke@435 55 class ContiguousSpace;
duke@435 56 class CompactPoint;
duke@435 57 class OopsInGenClosure;
duke@435 58 class OopClosure;
duke@435 59 class ScanClosure;
duke@435 60 class FastScanClosure;
duke@435 61 class GenCollectedHeap;
duke@435 62 class GenRemSet;
duke@435 63 class GCStats;
duke@435 64
duke@435 65 // A "ScratchBlock" represents a block of memory in one generation usable by
duke@435 66 // another. It represents "num_words" free words, starting at and including
duke@435 67 // the address of "this".
duke@435 68 struct ScratchBlock {
duke@435 69 ScratchBlock* next;
duke@435 70 size_t num_words;
duke@435 71 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming
duke@435 72 // first two fields are word-sized.)
duke@435 73 };
duke@435 74
duke@435 75
duke@435 76 class Generation: public CHeapObj {
duke@435 77 friend class VMStructs;
duke@435 78 private:
duke@435 79 jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
duke@435 80 MemRegion _prev_used_region; // for collectors that want to "remember" a value for
duke@435 81 // used region at some specific point during collection.
duke@435 82
duke@435 83 protected:
duke@435 84 // Minimum and maximum addresses for memory reserved (not necessarily
duke@435 85 // committed) for generation.
duke@435 86 // Used by card marking code. Must not overlap with address ranges of
duke@435 87 // other generations.
duke@435 88 MemRegion _reserved;
duke@435 89
duke@435 90 // Memory area reserved for generation
duke@435 91 VirtualSpace _virtual_space;
duke@435 92
duke@435 93 // Level in the generation hierarchy.
duke@435 94 int _level;
duke@435 95
duke@435 96 // ("Weak") Reference processing support
duke@435 97 ReferenceProcessor* _ref_processor;
duke@435 98
duke@435 99 // Performance Counters
duke@435 100 CollectorCounters* _gc_counters;
duke@435 101
duke@435 102 // Statistics for garbage collection
duke@435 103 GCStats* _gc_stats;
duke@435 104
duke@435 105 // Returns the next generation in the configuration, or else NULL if this
duke@435 106 // is the highest generation.
duke@435 107 Generation* next_gen() const;
duke@435 108
duke@435 109 // Initialize the generation.
duke@435 110 Generation(ReservedSpace rs, size_t initial_byte_size, int level);
duke@435 111
duke@435 112 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
duke@435 113 // "sp" that point into younger generations.
duke@435 114 // The iteration is only over objects allocated at the start of the
duke@435 115 // iterations; objects allocated as a result of applying the closure are
duke@435 116 // not included.
duke@435 117 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
duke@435 118
duke@435 119 public:
duke@435 120 // The set of possible generation kinds.
duke@435 121 enum Name {
duke@435 122 ASParNew,
duke@435 123 ASConcurrentMarkSweep,
duke@435 124 DefNew,
duke@435 125 ParNew,
duke@435 126 MarkSweepCompact,
duke@435 127 ConcurrentMarkSweep,
duke@435 128 Other
duke@435 129 };
duke@435 130
duke@435 131 enum SomePublicConstants {
duke@435 132 // Generations are GenGrain-aligned and have size that are multiples of
duke@435 133 // GenGrain.
duke@435 134 LogOfGenGrain = 16,
duke@435 135 GenGrain = 1 << LogOfGenGrain
duke@435 136 };
duke@435 137
duke@435 138 // allocate and initialize ("weak") refs processing support
duke@435 139 virtual void ref_processor_init();
duke@435 140 void set_ref_processor(ReferenceProcessor* rp) {
duke@435 141 assert(_ref_processor == NULL, "clobbering existing _ref_processor");
duke@435 142 _ref_processor = rp;
duke@435 143 }
duke@435 144
duke@435 145 virtual Generation::Name kind() { return Generation::Other; }
duke@435 146 GenerationSpec* spec();
duke@435 147
duke@435 148 // This properly belongs in the collector, but for now this
duke@435 149 // will do.
duke@435 150 virtual bool refs_discovery_is_atomic() const { return true; }
duke@435 151 virtual bool refs_discovery_is_mt() const { return false; }
duke@435 152
duke@435 153 // Space enquiries (results in bytes)
duke@435 154 virtual size_t capacity() const = 0; // The maximum number of object bytes the
duke@435 155 // generation can currently hold.
duke@435 156 virtual size_t used() const = 0; // The number of used bytes in the gen.
duke@435 157 virtual size_t free() const = 0; // The number of free bytes in the gen.
duke@435 158
duke@435 159 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
duke@435 160 // Returns the total number of bytes available in a generation
duke@435 161 // for the allocation of objects.
duke@435 162 virtual size_t max_capacity() const;
duke@435 163
duke@435 164 // If this is a young generation, the maximum number of bytes that can be
duke@435 165 // allocated in this generation before a GC is triggered.
duke@435 166 virtual size_t capacity_before_gc() const { return 0; }
duke@435 167
duke@435 168 // The largest number of contiguous free bytes in the generation,
duke@435 169 // including expansion (Assumes called at a safepoint.)
duke@435 170 virtual size_t contiguous_available() const = 0;
duke@435 171 // The largest number of contiguous free bytes in this or any higher generation.
duke@435 172 virtual size_t max_contiguous_available() const;
duke@435 173
duke@435 174 // Returns true if promotions of the specified amount can
duke@435 175 // be attempted safely (without a vm failure).
duke@435 176 // Promotion of the full amount is not guaranteed but
duke@435 177 // can be attempted.
duke@435 178 // younger_handles_promotion_failure
duke@435 179 // is true if the younger generation handles a promotion
duke@435 180 // failure.
duke@435 181 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
duke@435 182 bool younger_handles_promotion_failure) const;
duke@435 183
ysr@1580 184 // For a non-young generation, this interface can be used to inform a
ysr@1580 185 // generation that a promotion attempt into that generation failed.
ysr@1580 186 // Typically used to enable diagnostic output for post-mortem analysis,
ysr@1580 187 // but other uses of the interface are not ruled out.
ysr@1580 188 virtual void promotion_failure_occurred() { /* does nothing */ }
ysr@1580 189
duke@435 190 // Return an estimate of the maximum allocation that could be performed
duke@435 191 // in the generation without triggering any collection or expansion
duke@435 192 // activity. It is "unsafe" because no locks are taken; the result
duke@435 193 // should be treated as an approximation, not a guarantee, for use in
duke@435 194 // heuristic resizing decisions.
duke@435 195 virtual size_t unsafe_max_alloc_nogc() const = 0;
duke@435 196
duke@435 197 // Returns true if this generation cannot be expanded further
duke@435 198 // without a GC. Override as appropriate.
duke@435 199 virtual bool is_maximal_no_gc() const {
duke@435 200 return _virtual_space.uncommitted_size() == 0;
duke@435 201 }
duke@435 202
duke@435 203 MemRegion reserved() const { return _reserved; }
duke@435 204
duke@435 205 // Returns a region guaranteed to contain all the objects in the
duke@435 206 // generation.
duke@435 207 virtual MemRegion used_region() const { return _reserved; }
duke@435 208
duke@435 209 MemRegion prev_used_region() const { return _prev_used_region; }
duke@435 210 virtual void save_used_region() { _prev_used_region = used_region(); }
duke@435 211
duke@435 212 // Returns "TRUE" iff "p" points into an allocated object in the generation.
duke@435 213 // For some kinds of generations, this may be an expensive operation.
duke@435 214 // To avoid performance problems stemming from its inadvertent use in
duke@435 215 // product jvm's, we restrict its use to assertion checking or
duke@435 216 // verification only.
duke@435 217 virtual bool is_in(const void* p) const;
duke@435 218
duke@435 219 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */
duke@435 220 bool is_in_reserved(const void* p) const {
duke@435 221 return _reserved.contains(p);
duke@435 222 }
duke@435 223
duke@435 224 // Check that the generation kind is DefNewGeneration or a sub
duke@435 225 // class of DefNewGeneration and return a DefNewGeneration*
duke@435 226 DefNewGeneration* as_DefNewGeneration();
duke@435 227
duke@435 228 // If some space in the generation contains the given "addr", return a
duke@435 229 // pointer to that space, else return "NULL".
duke@435 230 virtual Space* space_containing(const void* addr) const;
duke@435 231
duke@435 232 // Iteration - do not use for time critical operations
duke@435 233 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0;
duke@435 234
duke@435 235 // Returns the first space, if any, in the generation that can participate
duke@435 236 // in compaction, or else "NULL".
duke@435 237 virtual CompactibleSpace* first_compaction_space() const = 0;
duke@435 238
duke@435 239 // Returns "true" iff this generation should be used to allocate an
duke@435 240 // object of the given size. Young generations might
duke@435 241 // wish to exclude very large objects, for example, since, if allocated
duke@435 242 // often, they would greatly increase the frequency of young-gen
duke@435 243 // collection.
duke@435 244 virtual bool should_allocate(size_t word_size, bool is_tlab) {
duke@435 245 bool result = false;
duke@435 246 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
duke@435 247 if (!is_tlab || supports_tlab_allocation()) {
duke@435 248 result = (word_size > 0) && (word_size < overflow_limit);
duke@435 249 }
duke@435 250 return result;
duke@435 251 }
duke@435 252
duke@435 253 // Allocate and returns a block of the requested size, or returns "NULL".
duke@435 254 // Assumes the caller has done any necessary locking.
duke@435 255 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0;
duke@435 256
duke@435 257 // Like "allocate", but performs any necessary locking internally.
duke@435 258 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
duke@435 259
duke@435 260 // A 'younger' gen has reached an allocation limit, and uses this to notify
duke@435 261 // the next older gen. The return value is a new limit, or NULL if none. The
duke@435 262 // caller must do the necessary locking.
duke@435 263 virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
duke@435 264 size_t word_size) {
duke@435 265 return NULL;
duke@435 266 }
duke@435 267
duke@435 268 // Some generation may offer a region for shared, contiguous allocation,
duke@435 269 // via inlined code (by exporting the address of the top and end fields
duke@435 270 // defining the extent of the contiguous allocation region.)
duke@435 271
duke@435 272 // This function returns "true" iff the heap supports this kind of
duke@435 273 // allocation. (More precisely, this means the style of allocation that
duke@435 274 // increments *top_addr()" with a CAS.) (Default is "no".)
duke@435 275 // A generation that supports this allocation style must use lock-free
duke@435 276 // allocation for *all* allocation, since there are times when lock free
duke@435 277 // allocation will be concurrent with plain "allocate" calls.
duke@435 278 virtual bool supports_inline_contig_alloc() const { return false; }
duke@435 279
duke@435 280 // These functions return the addresses of the fields that define the
duke@435 281 // boundaries of the contiguous allocation area. (These fields should be
duke@435 282 // physicall near to one another.)
duke@435 283 virtual HeapWord** top_addr() const { return NULL; }
duke@435 284 virtual HeapWord** end_addr() const { return NULL; }
duke@435 285
duke@435 286 // Thread-local allocation buffers
duke@435 287 virtual bool supports_tlab_allocation() const { return false; }
duke@435 288 virtual size_t tlab_capacity() const {
duke@435 289 guarantee(false, "Generation doesn't support thread local allocation buffers");
duke@435 290 return 0;
duke@435 291 }
duke@435 292 virtual size_t unsafe_max_tlab_alloc() const {
duke@435 293 guarantee(false, "Generation doesn't support thread local allocation buffers");
duke@435 294 return 0;
duke@435 295 }
duke@435 296
duke@435 297 // "obj" is the address of an object in a younger generation. Allocate space
duke@435 298 // for "obj" in the current (or some higher) generation, and copy "obj" into
duke@435 299 // the newly allocated space, if possible, returning the result (or NULL if
duke@435 300 // the allocation failed).
duke@435 301 //
duke@435 302 // The "obj_size" argument is just obj->size(), passed along so the caller can
duke@435 303 // avoid repeating the virtual call to retrieve it.
coleenp@548 304 virtual oop promote(oop obj, size_t obj_size);
duke@435 305
duke@435 306 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote
duke@435 307 // object "obj", whose original mark word was "m", and whose size is
duke@435 308 // "word_sz". If possible, allocate space for "obj", copy obj into it
duke@435 309 // (taking care to copy "m" into the mark word when done, since the mark
duke@435 310 // word of "obj" may have been overwritten with a forwarding pointer, and
duke@435 311 // also taking care to copy the klass pointer *last*. Returns the new
duke@435 312 // object if successful, or else NULL.
duke@435 313 virtual oop par_promote(int thread_num,
duke@435 314 oop obj, markOop m, size_t word_sz);
duke@435 315
duke@435 316 // Undo, if possible, the most recent par_promote_alloc allocation by
duke@435 317 // "thread_num" ("obj", of "word_sz").
duke@435 318 virtual void par_promote_alloc_undo(int thread_num,
duke@435 319 HeapWord* obj, size_t word_sz);
duke@435 320
duke@435 321 // Informs the current generation that all par_promote_alloc's in the
duke@435 322 // collection have been completed; any supporting data structures can be
duke@435 323 // reset. Default is to do nothing.
duke@435 324 virtual void par_promote_alloc_done(int thread_num) {}
duke@435 325
duke@435 326 // Informs the current generation that all oop_since_save_marks_iterates
duke@435 327 // performed by "thread_num" in the current collection, if any, have been
duke@435 328 // completed; any supporting data structures can be reset. Default is to
duke@435 329 // do nothing.
duke@435 330 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {}
duke@435 331
duke@435 332 // This generation will collect all younger generations
duke@435 333 // during a full collection.
duke@435 334 virtual bool full_collects_younger_generations() const { return false; }
duke@435 335
duke@435 336 // This generation does in-place marking, meaning that mark words
duke@435 337 // are mutated during the marking phase and presumably reinitialized
duke@435 338 // to a canonical value after the GC. This is currently used by the
duke@435 339 // biased locking implementation to determine whether additional
duke@435 340 // work is required during the GC prologue and epilogue.
duke@435 341 virtual bool performs_in_place_marking() const { return true; }
duke@435 342
duke@435 343 // Returns "true" iff collect() should subsequently be called on this
duke@435 344 // this generation. See comment below.
duke@435 345 // This is a generic implementation which can be overridden.
duke@435 346 //
duke@435 347 // Note: in the current (1.4) implementation, when genCollectedHeap's
duke@435 348 // incremental_collection_will_fail flag is set, all allocations are
duke@435 349 // slow path (the only fast-path place to allocate is DefNew, which
duke@435 350 // will be full if the flag is set).
duke@435 351 // Thus, older generations which collect younger generations should
duke@435 352 // test this flag and collect if it is set.
duke@435 353 virtual bool should_collect(bool full,
duke@435 354 size_t word_size,
duke@435 355 bool is_tlab) {
duke@435 356 return (full || should_allocate(word_size, is_tlab));
duke@435 357 }
duke@435 358
duke@435 359 // Perform a garbage collection.
duke@435 360 // If full is true attempt a full garbage collection of this generation.
duke@435 361 // Otherwise, attempting to (at least) free enough space to support an
duke@435 362 // allocation of the given "word_size".
duke@435 363 virtual void collect(bool full,
duke@435 364 bool clear_all_soft_refs,
duke@435 365 size_t word_size,
duke@435 366 bool is_tlab) = 0;
duke@435 367
duke@435 368 // Perform a heap collection, attempting to create (at least) enough
duke@435 369 // space to support an allocation of the given "word_size". If
duke@435 370 // successful, perform the allocation and return the resulting
duke@435 371 // "oop" (initializing the allocated block). If the allocation is
duke@435 372 // still unsuccessful, return "NULL".
duke@435 373 virtual HeapWord* expand_and_allocate(size_t word_size,
duke@435 374 bool is_tlab,
duke@435 375 bool parallel = false) = 0;
duke@435 376
duke@435 377 // Some generations may require some cleanup or preparation actions before
duke@435 378 // allowing a collection. The default is to do nothing.
duke@435 379 virtual void gc_prologue(bool full) {};
duke@435 380
duke@435 381 // Some generations may require some cleanup actions after a collection.
duke@435 382 // The default is to do nothing.
duke@435 383 virtual void gc_epilogue(bool full) {};
duke@435 384
jmasa@698 385 // Save the high water marks for the used space in a generation.
jmasa@698 386 virtual void record_spaces_top() {};
jmasa@698 387
duke@435 388 // Some generations may need to be "fixed-up" after some allocation
duke@435 389 // activity to make them parsable again. The default is to do nothing.
duke@435 390 virtual void ensure_parsability() {};
duke@435 391
duke@435 392 // Time (in ms) when we were last collected or now if a collection is
duke@435 393 // in progress.
duke@435 394 virtual jlong time_of_last_gc(jlong now) {
duke@435 395 // XXX See note in genCollectedHeap::millis_since_last_gc()
duke@435 396 NOT_PRODUCT(
duke@435 397 if (now < _time_of_last_gc) {
duke@435 398 warning("time warp: %d to %d", _time_of_last_gc, now);
duke@435 399 }
duke@435 400 )
duke@435 401 return _time_of_last_gc;
duke@435 402 }
duke@435 403
duke@435 404 virtual void update_time_of_last_gc(jlong now) {
duke@435 405 _time_of_last_gc = now;
duke@435 406 }
duke@435 407
duke@435 408 // Generations may keep statistics about collection. This
duke@435 409 // method updates those statistics. current_level is
duke@435 410 // the level of the collection that has most recently
duke@435 411 // occurred. This allows the generation to decide what
duke@435 412 // statistics are valid to collect. For example, the
duke@435 413 // generation can decide to gather the amount of promoted data
duke@435 414 // if the collection of the younger generations has completed.
duke@435 415 GCStats* gc_stats() const { return _gc_stats; }
duke@435 416 virtual void update_gc_stats(int current_level, bool full) {}
duke@435 417
duke@435 418 // Mark sweep support phase2
duke@435 419 virtual void prepare_for_compaction(CompactPoint* cp);
duke@435 420 // Mark sweep support phase3
duke@435 421 virtual void pre_adjust_pointers() {ShouldNotReachHere();}
duke@435 422 virtual void adjust_pointers();
duke@435 423 // Mark sweep support phase4
duke@435 424 virtual void compact();
duke@435 425 virtual void post_compact() {ShouldNotReachHere();}
duke@435 426
duke@435 427 // Support for CMS's rescan. In this general form we return a pointer
duke@435 428 // to an abstract object that can be used, based on specific previously
duke@435 429 // decided protocols, to exchange information between generations,
duke@435 430 // information that may be useful for speeding up certain types of
duke@435 431 // garbage collectors. A NULL value indicates to the client that
duke@435 432 // no data recording is expected by the provider. The data-recorder is
duke@435 433 // expected to be GC worker thread-local, with the worker index
duke@435 434 // indicated by "thr_num".
duke@435 435 virtual void* get_data_recorder(int thr_num) { return NULL; }
duke@435 436
duke@435 437 // Some generations may require some cleanup actions before allowing
duke@435 438 // a verification.
duke@435 439 virtual void prepare_for_verify() {};
duke@435 440
duke@435 441 // Accessing "marks".
duke@435 442
duke@435 443 // This function gives a generation a chance to note a point between
duke@435 444 // collections. For example, a contiguous generation might note the
duke@435 445 // beginning allocation point post-collection, which might allow some later
duke@435 446 // operations to be optimized.
duke@435 447 virtual void save_marks() {}
duke@435 448
duke@435 449 // This function allows generations to initialize any "saved marks". That
duke@435 450 // is, should only be called when the generation is empty.
duke@435 451 virtual void reset_saved_marks() {}
duke@435 452
duke@435 453 // This function is "true" iff any no allocations have occurred in the
duke@435 454 // generation since the last call to "save_marks".
duke@435 455 virtual bool no_allocs_since_save_marks() = 0;
duke@435 456
duke@435 457 // Apply "cl->apply" to (the addresses of) all reference fields in objects
duke@435 458 // allocated in the current generation since the last call to "save_marks".
duke@435 459 // If more objects are allocated in this generation as a result of applying
duke@435 460 // the closure, iterates over reference fields in those objects as well.
duke@435 461 // Calls "save_marks" at the end of the iteration.
duke@435 462 // General signature...
duke@435 463 virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
duke@435 464 // ...and specializations for de-virtualization. (The general
duke@435 465 // implemention of the _nv versions call the virtual version.
duke@435 466 // Note that the _nv suffix is not really semantically necessary,
duke@435 467 // but it avoids some not-so-useful warnings on Solaris.)
duke@435 468 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
duke@435 469 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
duke@435 470 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \
duke@435 471 }
duke@435 472 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL)
duke@435 473
duke@435 474 #undef Generation_SINCE_SAVE_MARKS_DECL
duke@435 475
duke@435 476 // The "requestor" generation is performing some garbage collection
duke@435 477 // action for which it would be useful to have scratch space. If
duke@435 478 // the target is not the requestor, no gc actions will be required
duke@435 479 // of the target. The requestor promises to allocate no more than
duke@435 480 // "max_alloc_words" in the target generation (via promotion say,
duke@435 481 // if the requestor is a young generation and the target is older).
duke@435 482 // If the target generation can provide any scratch space, it adds
duke@435 483 // it to "list", leaving "list" pointing to the head of the
duke@435 484 // augmented list. The default is to offer no space.
duke@435 485 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor,
duke@435 486 size_t max_alloc_words) {}
duke@435 487
jmasa@698 488 // Give each generation an opportunity to do clean up for any
jmasa@698 489 // contributed scratch.
jmasa@698 490 virtual void reset_scratch() {};
jmasa@698 491
duke@435 492 // When an older generation has been collected, and perhaps resized,
duke@435 493 // this method will be invoked on all younger generations (from older to
duke@435 494 // younger), allowing them to resize themselves as appropriate.
duke@435 495 virtual void compute_new_size() = 0;
duke@435 496
duke@435 497 // Printing
duke@435 498 virtual const char* name() const = 0;
duke@435 499 virtual const char* short_name() const = 0;
duke@435 500
duke@435 501 int level() const { return _level; }
duke@435 502
duke@435 503 // Attributes
duke@435 504
duke@435 505 // True iff the given generation may only be the youngest generation.
duke@435 506 virtual bool must_be_youngest() const = 0;
duke@435 507 // True iff the given generation may only be the oldest generation.
duke@435 508 virtual bool must_be_oldest() const = 0;
duke@435 509
duke@435 510 // Reference Processing accessor
duke@435 511 ReferenceProcessor* const ref_processor() { return _ref_processor; }
duke@435 512
duke@435 513 // Iteration.
duke@435 514
duke@435 515 // Iterate over all the ref-containing fields of all objects in the
duke@435 516 // generation, calling "cl.do_oop" on each.
duke@435 517 virtual void oop_iterate(OopClosure* cl);
duke@435 518
duke@435 519 // Same as above, restricted to the intersection of a memory region and
duke@435 520 // the generation.
duke@435 521 virtual void oop_iterate(MemRegion mr, OopClosure* cl);
duke@435 522
duke@435 523 // Iterate over all objects in the generation, calling "cl.do_object" on
duke@435 524 // each.
duke@435 525 virtual void object_iterate(ObjectClosure* cl);
duke@435 526
jmasa@952 527 // Iterate over all safe objects in the generation, calling "cl.do_object" on
jmasa@952 528 // each. An object is safe if its references point to other objects in
jmasa@952 529 // the heap. This defaults to object_iterate() unless overridden.
jmasa@952 530 virtual void safe_object_iterate(ObjectClosure* cl);
jmasa@952 531
duke@435 532 // Iterate over all objects allocated in the generation since the last
duke@435 533 // collection, calling "cl.do_object" on each. The generation must have
duke@435 534 // been initialized properly to support this function, or else this call
duke@435 535 // will fail.
duke@435 536 virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0;
duke@435 537
duke@435 538 // Apply "cl->do_oop" to (the address of) all and only all the ref fields
duke@435 539 // in the current generation that contain pointers to objects in younger
duke@435 540 // generations. Objects allocated since the last "save_marks" call are
duke@435 541 // excluded.
duke@435 542 virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0;
duke@435 543
duke@435 544 // Inform a generation that it longer contains references to objects
duke@435 545 // in any younger generation. [e.g. Because younger gens are empty,
duke@435 546 // clear the card table.]
duke@435 547 virtual void clear_remembered_set() { }
duke@435 548
duke@435 549 // Inform a generation that some of its objects have moved. [e.g. The
duke@435 550 // generation's spaces were compacted, invalidating the card table.]
duke@435 551 virtual void invalidate_remembered_set() { }
duke@435 552
duke@435 553 // Block abstraction.
duke@435 554
duke@435 555 // Returns the address of the start of the "block" that contains the
duke@435 556 // address "addr". We say "blocks" instead of "object" since some heaps
duke@435 557 // may not pack objects densely; a chunk may either be an object or a
duke@435 558 // non-object.
duke@435 559 virtual HeapWord* block_start(const void* addr) const;
duke@435 560
duke@435 561 // Requires "addr" to be the start of a chunk, and returns its size.
duke@435 562 // "addr + size" is required to be the start of a new chunk, or the end
duke@435 563 // of the active area of the heap.
duke@435 564 virtual size_t block_size(const HeapWord* addr) const ;
duke@435 565
duke@435 566 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@435 567 // the block is an object.
duke@435 568 virtual bool block_is_obj(const HeapWord* addr) const;
duke@435 569
duke@435 570
duke@435 571 // PrintGC, PrintGCDetails support
duke@435 572 void print_heap_change(size_t prev_used) const;
duke@435 573
duke@435 574 // PrintHeapAtGC support
duke@435 575 virtual void print() const;
duke@435 576 virtual void print_on(outputStream* st) const;
duke@435 577
duke@435 578 virtual void verify(bool allow_dirty) = 0;
duke@435 579
duke@435 580 struct StatRecord {
duke@435 581 int invocations;
duke@435 582 elapsedTimer accumulated_time;
duke@435 583 StatRecord() :
duke@435 584 invocations(0),
duke@435 585 accumulated_time(elapsedTimer()) {}
duke@435 586 };
duke@435 587 private:
duke@435 588 StatRecord _stat_record;
duke@435 589 public:
duke@435 590 StatRecord* stat_record() { return &_stat_record; }
duke@435 591
duke@435 592 virtual void print_summary_info();
duke@435 593 virtual void print_summary_info_on(outputStream* st);
duke@435 594
duke@435 595 // Performance Counter support
duke@435 596 virtual void update_counters() = 0;
duke@435 597 virtual CollectorCounters* counters() { return _gc_counters; }
duke@435 598 };
duke@435 599
duke@435 600 // Class CardGeneration is a generation that is covered by a card table,
duke@435 601 // and uses a card-size block-offset array to implement block_start.
duke@435 602
duke@435 603 // class BlockOffsetArray;
duke@435 604 // class BlockOffsetArrayContigSpace;
duke@435 605 class BlockOffsetSharedArray;
duke@435 606
duke@435 607 class CardGeneration: public Generation {
duke@435 608 friend class VMStructs;
duke@435 609 protected:
duke@435 610 // This is shared with other generations.
duke@435 611 GenRemSet* _rs;
duke@435 612 // This is local to this generation.
duke@435 613 BlockOffsetSharedArray* _bts;
duke@435 614
duke@435 615 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
duke@435 616 GenRemSet* remset);
duke@435 617
duke@435 618 public:
duke@435 619
jmasa@706 620 // Attempt to expand the generation by "bytes". Expand by at a
jmasa@706 621 // minimum "expand_bytes". Return true if some amount (not
jmasa@706 622 // necessarily the full "bytes") was done.
jmasa@706 623 virtual bool expand(size_t bytes, size_t expand_bytes);
jmasa@706 624
duke@435 625 virtual void clear_remembered_set();
duke@435 626
duke@435 627 virtual void invalidate_remembered_set();
duke@435 628
duke@435 629 virtual void prepare_for_verify();
jmasa@706 630
jmasa@706 631 // Grow generation with specified size (returns false if unable to grow)
jmasa@706 632 virtual bool grow_by(size_t bytes) = 0;
jmasa@706 633 // Grow generation to reserved size.
jmasa@706 634 virtual bool grow_to_reserved() = 0;
duke@435 635 };
duke@435 636
duke@435 637 // OneContigSpaceCardGeneration models a heap of old objects contained in a single
duke@435 638 // contiguous space.
duke@435 639 //
duke@435 640 // Garbage collection is performed using mark-compact.
duke@435 641
duke@435 642 class OneContigSpaceCardGeneration: public CardGeneration {
duke@435 643 friend class VMStructs;
duke@435 644 // Abstractly, this is a subtype that gets access to protected fields.
duke@435 645 friend class CompactingPermGen;
duke@435 646 friend class VM_PopulateDumpSharedSpace;
duke@435 647
duke@435 648 protected:
duke@435 649 size_t _min_heap_delta_bytes; // Minimum amount to expand.
duke@435 650 ContiguousSpace* _the_space; // actual space holding objects
duke@435 651 WaterMark _last_gc; // watermark between objects allocated before
duke@435 652 // and after last GC.
duke@435 653
duke@435 654 // Grow generation with specified size (returns false if unable to grow)
jmasa@706 655 virtual bool grow_by(size_t bytes);
duke@435 656 // Grow generation to reserved size.
jmasa@706 657 virtual bool grow_to_reserved();
duke@435 658 // Shrink generation with specified size (returns false if unable to shrink)
duke@435 659 void shrink_by(size_t bytes);
duke@435 660
duke@435 661 // Allocation failure
jmasa@706 662 virtual bool expand(size_t bytes, size_t expand_bytes);
duke@435 663 void shrink(size_t bytes);
duke@435 664
duke@435 665 // Accessing spaces
duke@435 666 ContiguousSpace* the_space() const { return _the_space; }
duke@435 667
duke@435 668 public:
duke@435 669 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
duke@435 670 size_t min_heap_delta_bytes,
duke@435 671 int level, GenRemSet* remset,
duke@435 672 ContiguousSpace* space) :
duke@435 673 CardGeneration(rs, initial_byte_size, level, remset),
duke@435 674 _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes)
duke@435 675 {}
duke@435 676
duke@435 677 inline bool is_in(const void* p) const;
duke@435 678
duke@435 679 // Space enquiries
duke@435 680 size_t capacity() const;
duke@435 681 size_t used() const;
duke@435 682 size_t free() const;
duke@435 683
duke@435 684 MemRegion used_region() const;
duke@435 685
duke@435 686 size_t unsafe_max_alloc_nogc() const;
duke@435 687 size_t contiguous_available() const;
duke@435 688
duke@435 689 // Iteration
duke@435 690 void object_iterate(ObjectClosure* blk);
duke@435 691 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
duke@435 692 void object_iterate_since_last_GC(ObjectClosure* cl);
duke@435 693
duke@435 694 void younger_refs_iterate(OopsInGenClosure* blk);
duke@435 695
duke@435 696 inline CompactibleSpace* first_compaction_space() const;
duke@435 697
duke@435 698 virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
duke@435 699 virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
duke@435 700
duke@435 701 // Accessing marks
duke@435 702 inline WaterMark top_mark();
duke@435 703 inline WaterMark bottom_mark();
duke@435 704
duke@435 705 #define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
duke@435 706 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
duke@435 707 OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
duke@435 708 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL)
duke@435 709
duke@435 710 void save_marks();
duke@435 711 void reset_saved_marks();
duke@435 712 bool no_allocs_since_save_marks();
duke@435 713
duke@435 714 inline size_t block_size(const HeapWord* addr) const;
duke@435 715
duke@435 716 inline bool block_is_obj(const HeapWord* addr) const;
duke@435 717
duke@435 718 virtual void collect(bool full,
duke@435 719 bool clear_all_soft_refs,
duke@435 720 size_t size,
duke@435 721 bool is_tlab);
duke@435 722 HeapWord* expand_and_allocate(size_t size,
duke@435 723 bool is_tlab,
duke@435 724 bool parallel = false);
duke@435 725
duke@435 726 virtual void prepare_for_verify();
duke@435 727
duke@435 728 virtual void gc_epilogue(bool full);
duke@435 729
jmasa@698 730 virtual void record_spaces_top();
jmasa@698 731
duke@435 732 virtual void verify(bool allow_dirty);
duke@435 733 virtual void print_on(outputStream* st) const;
duke@435 734 };

mercurial