src/share/vm/gc_implementation/g1/heapRegion.hpp

Mon, 03 Aug 2009 12:59:30 -0700

author
johnc
date
Mon, 03 Aug 2009 12:59:30 -0700
changeset 1324
15c5903cf9e1
parent 1246
830ca2573896
child 1377
2c79770d1f6e
permissions
-rw-r--r--

6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #ifndef SERIALGC
    27 // A HeapRegion is the smallest piece of a G1CollectedHeap that
    28 // can be collected independently.
    30 // NOTE: Although a HeapRegion is a Space, its
    31 // Space::initDirtyCardClosure method must not be called.
    32 // The problem is that the existence of this method breaks
    33 // the independence of barrier sets from remembered sets.
    34 // The solution is to remove this method from the definition
    35 // of a Space.
    37 class CompactibleSpace;
    38 class ContiguousSpace;
    39 class HeapRegionRemSet;
    40 class HeapRegionRemSetIterator;
    41 class HeapRegion;
    43 // A dirty card to oop closure for heap regions. It
    44 // knows how to get the G1 heap and how to use the bitmap
    45 // in the concurrent marker used by G1 to filter remembered
    46 // sets.
    48 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
    49 public:
    50   // Specification of possible DirtyCardToOopClosure filtering.
    51   enum FilterKind {
    52     NoFilterKind,
    53     IntoCSFilterKind,
    54     OutOfRegionFilterKind
    55   };
    57 protected:
    58   HeapRegion* _hr;
    59   FilterKind _fk;
    60   G1CollectedHeap* _g1;
    62   void walk_mem_region_with_cl(MemRegion mr,
    63                                HeapWord* bottom, HeapWord* top,
    64                                OopClosure* cl);
    66   // We don't specialize this for FilteringClosure; filtering is handled by
    67   // the "FilterKind" mechanism.  But we provide this to avoid a compiler
    68   // warning.
    69   void walk_mem_region_with_cl(MemRegion mr,
    70                                HeapWord* bottom, HeapWord* top,
    71                                FilteringClosure* cl) {
    72     HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
    73                                                        (OopClosure*)cl);
    74   }
    76   // Get the actual top of the area on which the closure will
    77   // operate, given where the top is assumed to be (the end of the
    78   // memory region passed to do_MemRegion) and where the object
    79   // at the top is assumed to start. For example, an object may
    80   // start at the top but actually extend past the assumed top,
    81   // in which case the top becomes the end of the object.
    82   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
    83     return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
    84   }
    86   // Walk the given memory region from bottom to (actual) top
    87   // looking for objects and applying the oop closure (_cl) to
    88   // them. The base implementation of this treats the area as
    89   // blocks, where a block may or may not be an object. Sub-
    90   // classes should override this to provide more accurate
    91   // or possibly more efficient walking.
    92   void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
    93     Filtering_DCTOC::walk_mem_region(mr, bottom, top);
    94   }
    96 public:
    97   HeapRegionDCTOC(G1CollectedHeap* g1,
    98                   HeapRegion* hr, OopClosure* cl,
    99                   CardTableModRefBS::PrecisionStyle precision,
   100                   FilterKind fk);
   101 };
   104 // The complicating factor is that BlockOffsetTable diverged
   105 // significantly, and we need functionality that is only in the G1 version.
   106 // So I copied that code, which led to an alternate G1 version of
   107 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
   108 // be reconciled, then G1OffsetTableContigSpace could go away.
   110 // The idea behind time stamps is the following. Doing a save_marks on
   111 // all regions at every GC pause is time consuming (if I remember
   112 // well, 10ms or so). So, we would like to do that only for regions
   113 // that are GC alloc regions. To achieve this, we use time
   114 // stamps. For every evacuation pause, G1CollectedHeap generates a
   115 // unique time stamp (essentially a counter that gets
   116 // incremented). Every time we want to call save_marks on a region,
   117 // we set the saved_mark_word to top and also copy the current GC
   118 // time stamp to the time stamp field of the space. Reading the
   119 // saved_mark_word involves checking the time stamp of the
   120 // region. If it is the same as the current GC time stamp, then we
   121 // can safely read the saved_mark_word field, as it is valid. If the
   122 // time stamp of the region is not the same as the current GC time
   123 // stamp, then we instead read top, as the saved_mark_word field is
   124 // invalid. Time stamps (on the regions and also on the
   125 // G1CollectedHeap) are reset at every cleanup (we iterate over
   126 // the regions anyway) and at the end of a Full GC. The current scheme
   127 // that uses sequential unsigned ints will fail only if we have 4b
   128 // evacuation pauses between two cleanups, which is _highly_ unlikely.
   130 class G1OffsetTableContigSpace: public ContiguousSpace {
   131   friend class VMStructs;
   132  protected:
   133   G1BlockOffsetArrayContigSpace _offsets;
   134   Mutex _par_alloc_lock;
   135   volatile unsigned _gc_time_stamp;
   137  public:
   138   // Constructor.  If "is_zeroed" is true, the MemRegion "mr" may be
   139   // assumed to contain zeros.
   140   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
   141                            MemRegion mr, bool is_zeroed = false);
   143   void set_bottom(HeapWord* value);
   144   void set_end(HeapWord* value);
   146   virtual HeapWord* saved_mark_word() const;
   147   virtual void set_saved_mark();
   148   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
   150   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   151   virtual void clear(bool mangle_space);
   153   HeapWord* block_start(const void* p);
   154   HeapWord* block_start_const(const void* p) const;
   156   // Add offset table update.
   157   virtual HeapWord* allocate(size_t word_size);
   158   HeapWord* par_allocate(size_t word_size);
   160   // MarkSweep support phase3
   161   virtual HeapWord* initialize_threshold();
   162   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   164   virtual void print() const;
   165 };
   167 class HeapRegion: public G1OffsetTableContigSpace {
   168   friend class VMStructs;
   169  private:
   171   enum HumongousType {
   172     NotHumongous = 0,
   173     StartsHumongous,
   174     ContinuesHumongous
   175   };
   177   // The next filter kind that should be used for a "new_dcto_cl" call with
   178   // the "traditional" signature.
   179   HeapRegionDCTOC::FilterKind _next_fk;
   181   // Requires that the region "mr" be dense with objects, and begin and end
   182   // with an object.
   183   void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
   185   // The remembered set for this region.
   186   // (Might want to make this "inline" later, to avoid some alloc failure
   187   // issues.)
   188   HeapRegionRemSet* _rem_set;
   190   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
   192  protected:
   193   // If this region is a member of a HeapRegionSeq, the index in that
   194   // sequence, otherwise -1.
   195   int  _hrs_index;
   197   HumongousType _humongous_type;
   198   // For a humongous region, region in which it starts.
   199   HeapRegion* _humongous_start_region;
   200   // For the start region of a humongous sequence, it's original end().
   201   HeapWord* _orig_end;
   203   // True iff the region is in current collection_set.
   204   bool _in_collection_set;
   206     // True iff the region is on the unclean list, waiting to be zero filled.
   207   bool _is_on_unclean_list;
   209   // True iff the region is on the free list, ready for allocation.
   210   bool _is_on_free_list;
   212   // Is this or has it been an allocation region in the current collection
   213   // pause.
   214   bool _is_gc_alloc_region;
   216   // True iff an attempt to evacuate an object in the region failed.
   217   bool _evacuation_failed;
   219   // A heap region may be a member one of a number of special subsets, each
   220   // represented as linked lists through the field below.  Currently, these
   221   // sets include:
   222   //   The collection set.
   223   //   The set of allocation regions used in a collection pause.
   224   //   Spaces that may contain gray objects.
   225   HeapRegion* _next_in_special_set;
   227   // next region in the young "generation" region set
   228   HeapRegion* _next_young_region;
   230   // Next region whose cards need cleaning
   231   HeapRegion* _next_dirty_cards_region;
   233   // For parallel heapRegion traversal.
   234   jint _claimed;
   236   // We use concurrent marking to determine the amount of live data
   237   // in each heap region.
   238   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
   239   size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
   241   // See "sort_index" method.  -1 means is not in the array.
   242   int _sort_index;
   244   // <PREDICTION>
   245   double _gc_efficiency;
   246   // </PREDICTION>
   248   enum YoungType {
   249     NotYoung,                   // a region is not young
   250     ScanOnly,                   // a region is young and scan-only
   251     Young,                      // a region is young
   252     Survivor                    // a region is young and it contains
   253                                 // survivor
   254   };
   256   YoungType _young_type;
   257   int  _young_index_in_cset;
   258   SurvRateGroup* _surv_rate_group;
   259   int  _age_index;
   261   // The start of the unmarked area. The unmarked area extends from this
   262   // word until the top and/or end of the region, and is the part
   263   // of the region for which no marking was done, i.e. objects may
   264   // have been allocated in this part since the last mark phase.
   265   // "prev" is the top at the start of the last completed marking.
   266   // "next" is the top at the start of the in-progress marking (if any.)
   267   HeapWord* _prev_top_at_mark_start;
   268   HeapWord* _next_top_at_mark_start;
   269   // If a collection pause is in progress, this is the top at the start
   270   // of that pause.
   272   // We've counted the marked bytes of objects below here.
   273   HeapWord* _top_at_conc_mark_count;
   275   void init_top_at_mark_start() {
   276     assert(_prev_marked_bytes == 0 &&
   277            _next_marked_bytes == 0,
   278            "Must be called after zero_marked_bytes.");
   279     HeapWord* bot = bottom();
   280     _prev_top_at_mark_start = bot;
   281     _next_top_at_mark_start = bot;
   282     _top_at_conc_mark_count = bot;
   283   }
   285   jint _zfs;  // A member of ZeroFillState.  Protected by ZF_lock.
   286   Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
   287                         // made it so.
   289   void set_young_type(YoungType new_type) {
   290     //assert(_young_type != new_type, "setting the same type" );
   291     // TODO: add more assertions here
   292     _young_type = new_type;
   293   }
   295  public:
   296   // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
   297   HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
   298              MemRegion mr, bool is_zeroed);
   300   enum SomePublicConstants {
   301     // HeapRegions are GrainBytes-aligned
   302     // and have sizes that are multiples of GrainBytes.
   303     LogOfHRGrainBytes = 20,
   304     LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize,
   305     GrainBytes = 1 << LogOfHRGrainBytes,
   306     GrainWords = 1 <<LogOfHRGrainWords,
   307     MaxAge = 2, NoOfAges = MaxAge+1
   308   };
   310   enum ClaimValues {
   311     InitialClaimValue     = 0,
   312     FinalCountClaimValue  = 1,
   313     NoteEndClaimValue     = 2,
   314     ScrubRemSetClaimValue = 3,
   315     ParVerifyClaimValue   = 4,
   316     RebuildRSClaimValue   = 5
   317   };
   319   // Concurrent refinement requires contiguous heap regions (in which TLABs
   320   // might be allocated) to be zero-filled.  Each region therefore has a
   321   // zero-fill-state.
   322   enum ZeroFillState {
   323     NotZeroFilled,
   324     ZeroFilling,
   325     ZeroFilled,
   326     Allocated
   327   };
   329   // If this region is a member of a HeapRegionSeq, the index in that
   330   // sequence, otherwise -1.
   331   int hrs_index() const { return _hrs_index; }
   332   void set_hrs_index(int index) { _hrs_index = index; }
   334   // The number of bytes marked live in the region in the last marking phase.
   335   size_t marked_bytes()    { return _prev_marked_bytes; }
   336   // The number of bytes counted in the next marking.
   337   size_t next_marked_bytes() { return _next_marked_bytes; }
   338   // The number of bytes live wrt the next marking.
   339   size_t next_live_bytes() {
   340     return (top() - next_top_at_mark_start())
   341       * HeapWordSize
   342       + next_marked_bytes();
   343   }
   345   // A lower bound on the amount of garbage bytes in the region.
   346   size_t garbage_bytes() {
   347     size_t used_at_mark_start_bytes =
   348       (prev_top_at_mark_start() - bottom()) * HeapWordSize;
   349     assert(used_at_mark_start_bytes >= marked_bytes(),
   350            "Can't mark more than we have.");
   351     return used_at_mark_start_bytes - marked_bytes();
   352   }
   354   // An upper bound on the number of live bytes in the region.
   355   size_t max_live_bytes() { return used() - garbage_bytes(); }
   357   void add_to_marked_bytes(size_t incr_bytes) {
   358     _next_marked_bytes = _next_marked_bytes + incr_bytes;
   359     guarantee( _next_marked_bytes <= used(), "invariant" );
   360   }
   362   void zero_marked_bytes()      {
   363     _prev_marked_bytes = _next_marked_bytes = 0;
   364   }
   366   bool isHumongous() const { return _humongous_type != NotHumongous; }
   367   bool startsHumongous() const { return _humongous_type == StartsHumongous; }
   368   bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
   369   // For a humongous region, region in which it starts.
   370   HeapRegion* humongous_start_region() const {
   371     return _humongous_start_region;
   372   }
   374   // Causes the current region to represent a humongous object spanning "n"
   375   // regions.
   376   virtual void set_startsHumongous();
   378   // The regions that continue a humongous sequence should be added using
   379   // this method, in increasing address order.
   380   void set_continuesHumongous(HeapRegion* start);
   382   void add_continuingHumongousRegion(HeapRegion* cont);
   384   // If the region has a remembered set, return a pointer to it.
   385   HeapRegionRemSet* rem_set() const {
   386     return _rem_set;
   387   }
   389   // True iff the region is in current collection_set.
   390   bool in_collection_set() const {
   391     return _in_collection_set;
   392   }
   393   void set_in_collection_set(bool b) {
   394     _in_collection_set = b;
   395   }
   396   HeapRegion* next_in_collection_set() {
   397     assert(in_collection_set(), "should only invoke on member of CS.");
   398     assert(_next_in_special_set == NULL ||
   399            _next_in_special_set->in_collection_set(),
   400            "Malformed CS.");
   401     return _next_in_special_set;
   402   }
   403   void set_next_in_collection_set(HeapRegion* r) {
   404     assert(in_collection_set(), "should only invoke on member of CS.");
   405     assert(r == NULL || r->in_collection_set(), "Malformed CS.");
   406     _next_in_special_set = r;
   407   }
   409   // True iff it is or has been an allocation region in the current
   410   // collection pause.
   411   bool is_gc_alloc_region() const {
   412     return _is_gc_alloc_region;
   413   }
   414   void set_is_gc_alloc_region(bool b) {
   415     _is_gc_alloc_region = b;
   416   }
   417   HeapRegion* next_gc_alloc_region() {
   418     assert(is_gc_alloc_region(), "should only invoke on member of CS.");
   419     assert(_next_in_special_set == NULL ||
   420            _next_in_special_set->is_gc_alloc_region(),
   421            "Malformed CS.");
   422     return _next_in_special_set;
   423   }
   424   void set_next_gc_alloc_region(HeapRegion* r) {
   425     assert(is_gc_alloc_region(), "should only invoke on member of CS.");
   426     assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
   427     _next_in_special_set = r;
   428   }
   430   bool is_on_free_list() {
   431     return _is_on_free_list;
   432   }
   434   void set_on_free_list(bool b) {
   435     _is_on_free_list = b;
   436   }
   438   HeapRegion* next_from_free_list() {
   439     assert(is_on_free_list(),
   440            "Should only invoke on free space.");
   441     assert(_next_in_special_set == NULL ||
   442            _next_in_special_set->is_on_free_list(),
   443            "Malformed Free List.");
   444     return _next_in_special_set;
   445   }
   447   void set_next_on_free_list(HeapRegion* r) {
   448     assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
   449     _next_in_special_set = r;
   450   }
   452   bool is_on_unclean_list() {
   453     return _is_on_unclean_list;
   454   }
   456   void set_on_unclean_list(bool b);
   458   HeapRegion* next_from_unclean_list() {
   459     assert(is_on_unclean_list(),
   460            "Should only invoke on unclean space.");
   461     assert(_next_in_special_set == NULL ||
   462            _next_in_special_set->is_on_unclean_list(),
   463            "Malformed unclean List.");
   464     return _next_in_special_set;
   465   }
   467   void set_next_on_unclean_list(HeapRegion* r);
   469   HeapRegion* get_next_young_region() { return _next_young_region; }
   470   void set_next_young_region(HeapRegion* hr) {
   471     _next_young_region = hr;
   472   }
   474   HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
   475   HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
   476   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
   477   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
   479   // Allows logical separation between objects allocated before and after.
   480   void save_marks();
   482   // Reset HR stuff to default values.
   483   void hr_clear(bool par, bool clear_space);
   485   void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   487   // Ensure that "this" is zero-filled.
   488   void ensure_zero_filled();
   489   // This one requires that the calling thread holds ZF_mon.
   490   void ensure_zero_filled_locked();
   492   // Get the start of the unmarked area in this region.
   493   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
   494   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
   496   // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
   497   // allocated in the current region before the last call to "save_mark".
   498   void oop_before_save_marks_iterate(OopClosure* cl);
   500   // This call determines the "filter kind" argument that will be used for
   501   // the next call to "new_dcto_cl" on this region with the "traditional"
   502   // signature (i.e., the call below.)  The default, in the absence of a
   503   // preceding call to this method, is "NoFilterKind", and a call to this
   504   // method is necessary for each such call, or else it reverts to the
   505   // default.
   506   // (This is really ugly, but all other methods I could think of changed a
   507   // lot of main-line code for G1.)
   508   void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
   509     _next_fk = nfk;
   510   }
   512   DirtyCardToOopClosure*
   513   new_dcto_closure(OopClosure* cl,
   514                    CardTableModRefBS::PrecisionStyle precision,
   515                    HeapRegionDCTOC::FilterKind fk);
   517 #if WHASSUP
   518   DirtyCardToOopClosure*
   519   new_dcto_closure(OopClosure* cl,
   520                    CardTableModRefBS::PrecisionStyle precision,
   521                    HeapWord* boundary) {
   522     assert(boundary == NULL, "This arg doesn't make sense here.");
   523     DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
   524     _next_fk = HeapRegionDCTOC::NoFilterKind;
   525     return res;
   526   }
   527 #endif
   529   //
   530   // Note the start or end of marking. This tells the heap region
   531   // that the collector is about to start or has finished (concurrently)
   532   // marking the heap.
   533   //
   535   // Note the start of a marking phase. Record the
   536   // start of the unmarked area of the region here.
   537   void note_start_of_marking(bool during_initial_mark) {
   538     init_top_at_conc_mark_count();
   539     _next_marked_bytes = 0;
   540     if (during_initial_mark && is_young() && !is_survivor())
   541       _next_top_at_mark_start = bottom();
   542     else
   543       _next_top_at_mark_start = top();
   544   }
   546   // Note the end of a marking phase. Install the start of
   547   // the unmarked area that was captured at start of marking.
   548   void note_end_of_marking() {
   549     _prev_top_at_mark_start = _next_top_at_mark_start;
   550     _prev_marked_bytes = _next_marked_bytes;
   551     _next_marked_bytes = 0;
   553     guarantee(_prev_marked_bytes <=
   554               (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
   555               "invariant");
   556   }
   558   // After an evacuation, we need to update _next_top_at_mark_start
   559   // to be the current top.  Note this is only valid if we have only
   560   // ever evacuated into this region.  If we evacuate, allocate, and
   561   // then evacuate we are in deep doodoo.
   562   void note_end_of_copying() {
   563     assert(top() >= _next_top_at_mark_start,
   564            "Increase only");
   565     // Survivor regions will be scanned on the start of concurrent
   566     // marking.
   567     if (!is_survivor()) {
   568       _next_top_at_mark_start = top();
   569     }
   570   }
   572   // Returns "false" iff no object in the region was allocated when the
   573   // last mark phase ended.
   574   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
   576   // If "is_marked()" is true, then this is the index of the region in
   577   // an array constructed at the end of marking of the regions in a
   578   // "desirability" order.
   579   int sort_index() {
   580     return _sort_index;
   581   }
   582   void set_sort_index(int i) {
   583     _sort_index = i;
   584   }
   586   void init_top_at_conc_mark_count() {
   587     _top_at_conc_mark_count = bottom();
   588   }
   590   void set_top_at_conc_mark_count(HeapWord *cur) {
   591     assert(bottom() <= cur && cur <= end(), "Sanity.");
   592     _top_at_conc_mark_count = cur;
   593   }
   595   HeapWord* top_at_conc_mark_count() {
   596     return _top_at_conc_mark_count;
   597   }
   599   void reset_during_compaction() {
   600     guarantee( isHumongous() && startsHumongous(),
   601                "should only be called for humongous regions");
   603     zero_marked_bytes();
   604     init_top_at_mark_start();
   605   }
   607   // <PREDICTION>
   608   void calc_gc_efficiency(void);
   609   double gc_efficiency() { return _gc_efficiency;}
   610   // </PREDICTION>
   612   bool is_young() const     { return _young_type != NotYoung; }
   613   bool is_scan_only() const { return _young_type == ScanOnly; }
   614   bool is_survivor() const  { return _young_type == Survivor; }
   616   int  young_index_in_cset() const { return _young_index_in_cset; }
   617   void set_young_index_in_cset(int index) {
   618     assert( (index == -1) || is_young(), "pre-condition" );
   619     _young_index_in_cset = index;
   620   }
   622   int age_in_surv_rate_group() {
   623     assert( _surv_rate_group != NULL, "pre-condition" );
   624     assert( _age_index > -1, "pre-condition" );
   625     return _surv_rate_group->age_in_group(_age_index);
   626   }
   628   void recalculate_age_in_surv_rate_group() {
   629     assert( _surv_rate_group != NULL, "pre-condition" );
   630     assert( _age_index > -1, "pre-condition" );
   631     _age_index = _surv_rate_group->recalculate_age_index(_age_index);
   632   }
   634   void record_surv_words_in_group(size_t words_survived) {
   635     assert( _surv_rate_group != NULL, "pre-condition" );
   636     assert( _age_index > -1, "pre-condition" );
   637     int age_in_group = age_in_surv_rate_group();
   638     _surv_rate_group->record_surviving_words(age_in_group, words_survived);
   639   }
   641   int age_in_surv_rate_group_cond() {
   642     if (_surv_rate_group != NULL)
   643       return age_in_surv_rate_group();
   644     else
   645       return -1;
   646   }
   648   SurvRateGroup* surv_rate_group() {
   649     return _surv_rate_group;
   650   }
   652   void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
   653     assert( surv_rate_group != NULL, "pre-condition" );
   654     assert( _surv_rate_group == NULL, "pre-condition" );
   655     assert( is_young(), "pre-condition" );
   657     _surv_rate_group = surv_rate_group;
   658     _age_index = surv_rate_group->next_age_index();
   659   }
   661   void uninstall_surv_rate_group() {
   662     if (_surv_rate_group != NULL) {
   663       assert( _age_index > -1, "pre-condition" );
   664       assert( is_young(), "pre-condition" );
   666       _surv_rate_group = NULL;
   667       _age_index = -1;
   668     } else {
   669       assert( _age_index == -1, "pre-condition" );
   670     }
   671   }
   673   void set_young() { set_young_type(Young); }
   675   void set_scan_only() { set_young_type(ScanOnly); }
   677   void set_survivor() { set_young_type(Survivor); }
   679   void set_not_young() { set_young_type(NotYoung); }
   681   // Determine if an object has been allocated since the last
   682   // mark performed by the collector. This returns true iff the object
   683   // is within the unmarked area of the region.
   684   bool obj_allocated_since_prev_marking(oop obj) const {
   685     return (HeapWord *) obj >= prev_top_at_mark_start();
   686   }
   687   bool obj_allocated_since_next_marking(oop obj) const {
   688     return (HeapWord *) obj >= next_top_at_mark_start();
   689   }
   691   // For parallel heapRegion traversal.
   692   bool claimHeapRegion(int claimValue);
   693   jint claim_value() { return _claimed; }
   694   // Use this carefully: only when you're sure no one is claiming...
   695   void set_claim_value(int claimValue) { _claimed = claimValue; }
   697   // Returns the "evacuation_failed" property of the region.
   698   bool evacuation_failed() { return _evacuation_failed; }
   700   // Sets the "evacuation_failed" property of the region.
   701   void set_evacuation_failed(bool b) {
   702     _evacuation_failed = b;
   704     if (b) {
   705       init_top_at_conc_mark_count();
   706       _next_marked_bytes = 0;
   707     }
   708   }
   710   // Requires that "mr" be entirely within the region.
   711   // Apply "cl->do_object" to all objects that intersect with "mr".
   712   // If the iteration encounters an unparseable portion of the region,
   713   // or if "cl->abort()" is true after a closure application,
   714   // terminate the iteration and return the address of the start of the
   715   // subregion that isn't done.  (The two can be distinguished by querying
   716   // "cl->abort()".)  Return of "NULL" indicates that the iteration
   717   // completed.
   718   HeapWord*
   719   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
   721   HeapWord*
   722   oops_on_card_seq_iterate_careful(MemRegion mr,
   723                                    FilterOutOfRegionClosure* cl);
   725   // The region "mr" is entirely in "this", and starts and ends at block
   726   // boundaries. The caller declares that all the contained blocks are
   727   // coalesced into one.
   728   void declare_filled_region_to_BOT(MemRegion mr) {
   729     _offsets.single_block(mr.start(), mr.end());
   730   }
   732   // A version of block start that is guaranteed to find *some* block
   733   // boundary at or before "p", but does not object iteration, and may
   734   // therefore be used safely when the heap is unparseable.
   735   HeapWord* block_start_careful(const void* p) const {
   736     return _offsets.block_start_careful(p);
   737   }
   739   // Requires that "addr" is within the region.  Returns the start of the
   740   // first ("careful") block that starts at or after "addr", or else the
   741   // "end" of the region if there is no such block.
   742   HeapWord* next_block_start_careful(HeapWord* addr);
   744   // Returns the zero-fill-state of the current region.
   745   ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
   746   bool zero_fill_is_allocated() { return _zfs == Allocated; }
   747   Thread* zero_filler() { return _zero_filler; }
   749   // Indicate that the contents of the region are unknown, and therefore
   750   // might require zero-filling.
   751   void set_zero_fill_needed() {
   752     set_zero_fill_state_work(NotZeroFilled);
   753   }
   754   void set_zero_fill_in_progress(Thread* t) {
   755     set_zero_fill_state_work(ZeroFilling);
   756     _zero_filler = t;
   757   }
   758   void set_zero_fill_complete();
   759   void set_zero_fill_allocated() {
   760     set_zero_fill_state_work(Allocated);
   761   }
   763   void set_zero_fill_state_work(ZeroFillState zfs);
   765   // This is called when a full collection shrinks the heap.
   766   // We want to set the heap region to a value which says
   767   // it is no longer part of the heap.  For now, we'll let "NotZF" fill
   768   // that role.
   769   void reset_zero_fill() {
   770     set_zero_fill_state_work(NotZeroFilled);
   771     _zero_filler = NULL;
   772   }
   774 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
   775   virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
   776   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
   778   CompactibleSpace* next_compaction_space() const;
   780   virtual void reset_after_compaction();
   782   void print() const;
   783   void print_on(outputStream* st) const;
   785   // use_prev_marking == true  -> use "prev" marking information,
   786   // use_prev_marking == false -> use "next" marking information
   787   // NOTE: Only the "prev" marking information is guaranteed to be
   788   // consistent most of the time, so most calls to this should use
   789   // use_prev_marking == true. Currently, there is only one case where
   790   // this is called with use_prev_marking == false, which is to verify
   791   // the "next" marking information at the end of remark.
   792   void verify(bool allow_dirty, bool use_prev_marking) const;
   794   // Override; it uses the "prev" marking information
   795   virtual void verify(bool allow_dirty) const;
   797 #ifdef DEBUG
   798   HeapWord* allocate(size_t size);
   799 #endif
   800 };
   802 // HeapRegionClosure is used for iterating over regions.
   803 // Terminates the iteration when the "doHeapRegion" method returns "true".
   804 class HeapRegionClosure : public StackObj {
   805   friend class HeapRegionSeq;
   806   friend class G1CollectedHeap;
   808   bool _complete;
   809   void incomplete() { _complete = false; }
   811  public:
   812   HeapRegionClosure(): _complete(true) {}
   814   // Typically called on each region until it returns true.
   815   virtual bool doHeapRegion(HeapRegion* r) = 0;
   817   // True after iteration if the closure was applied to all heap regions
   818   // and returned "false" in all cases.
   819   bool complete() { return _complete; }
   820 };
   822 // A linked lists of heap regions.  It leaves the "next" field
   823 // unspecified; that's up to subtypes.
   824 class RegionList VALUE_OBJ_CLASS_SPEC {
   825 protected:
   826   virtual HeapRegion* get_next(HeapRegion* chr) = 0;
   827   virtual void set_next(HeapRegion* chr,
   828                         HeapRegion* new_next) = 0;
   830   HeapRegion* _hd;
   831   HeapRegion* _tl;
   832   size_t _sz;
   834   // Protected constructor because this type is only meaningful
   835   // when the _get/_set next functions are defined.
   836   RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
   837 public:
   838   void reset() {
   839     _hd = NULL;
   840     _tl = NULL;
   841     _sz = 0;
   842   }
   843   HeapRegion* hd() { return _hd; }
   844   HeapRegion* tl() { return _tl; }
   845   size_t sz() { return _sz; }
   846   size_t length();
   848   bool well_formed() {
   849     return
   850       ((hd() == NULL && tl() == NULL && sz() == 0)
   851        || (hd() != NULL && tl() != NULL && sz() > 0))
   852       && (sz() == length());
   853   }
   854   virtual void insert_before_head(HeapRegion* r);
   855   void prepend_list(RegionList* new_list);
   856   virtual HeapRegion* pop();
   857   void dec_sz() { _sz--; }
   858   // Requires that "r" is an element of the list, and is not the tail.
   859   void delete_after(HeapRegion* r);
   860 };
   862 class EmptyNonHRegionList: public RegionList {
   863 protected:
   864   // Protected constructor because this type is only meaningful
   865   // when the _get/_set next functions are defined.
   866   EmptyNonHRegionList() : RegionList() {}
   868 public:
   869   void insert_before_head(HeapRegion* r) {
   870     //    assert(r->is_empty(), "Better be empty");
   871     assert(!r->isHumongous(), "Better not be humongous.");
   872     RegionList::insert_before_head(r);
   873   }
   874   void prepend_list(EmptyNonHRegionList* new_list) {
   875     //    assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
   876     //     "Better be empty");
   877     assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
   878            "Better not be humongous.");
   879     //    assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
   880     //     "Better be empty");
   881     assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
   882            "Better not be humongous.");
   883     RegionList::prepend_list(new_list);
   884   }
   885 };
   887 class UncleanRegionList: public EmptyNonHRegionList {
   888 public:
   889   HeapRegion* get_next(HeapRegion* hr) {
   890     return hr->next_from_unclean_list();
   891   }
   892   void set_next(HeapRegion* hr, HeapRegion* new_next) {
   893     hr->set_next_on_unclean_list(new_next);
   894   }
   896   UncleanRegionList() : EmptyNonHRegionList() {}
   898   void insert_before_head(HeapRegion* r) {
   899     assert(!r->is_on_free_list(),
   900            "Better not already be on free list");
   901     assert(!r->is_on_unclean_list(),
   902            "Better not already be on unclean list");
   903     r->set_zero_fill_needed();
   904     r->set_on_unclean_list(true);
   905     EmptyNonHRegionList::insert_before_head(r);
   906   }
   907   void prepend_list(UncleanRegionList* new_list) {
   908     assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
   909            "Better not already be on free list");
   910     assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
   911            "Better already be marked as on unclean list");
   912     assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
   913            "Better not already be on free list");
   914     assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
   915            "Better already be marked as on unclean list");
   916     EmptyNonHRegionList::prepend_list(new_list);
   917   }
   918   HeapRegion* pop() {
   919     HeapRegion* res = RegionList::pop();
   920     if (res != NULL) res->set_on_unclean_list(false);
   921     return res;
   922   }
   923 };
   925 // Local Variables: ***
   926 // c-indentation-style: gnu ***
   927 // End: ***
   929 #endif // SERIALGC

mercurial