src/share/vm/gc_implementation/g1/heapRegion.hpp

Sat, 16 Oct 2010 17:12:19 -0400

author
tonyp
date
Sat, 16 Oct 2010 17:12:19 -0400
changeset 2241
72a161e62cc4
parent 2021
5cbac8938c4c
child 2314
f95d63e2154a
permissions
-rw-r--r--

6991377: G1: race between concurrent refinement and humongous object allocation
Summary: There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT.
Reviewed-by: jcoomes, johnc, ysr

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SERIALGC
    27 // A HeapRegion is the smallest piece of a G1CollectedHeap that
    28 // can be collected independently.
    30 // NOTE: Although a HeapRegion is a Space, its
    31 // Space::initDirtyCardClosure method must not be called.
    32 // The problem is that the existence of this method breaks
    33 // the independence of barrier sets from remembered sets.
    34 // The solution is to remove this method from the definition
    35 // of a Space.
    37 class CompactibleSpace;
    38 class ContiguousSpace;
    39 class HeapRegionRemSet;
    40 class HeapRegionRemSetIterator;
    41 class HeapRegion;
    43 // A dirty card to oop closure for heap regions. It
    44 // knows how to get the G1 heap and how to use the bitmap
    45 // in the concurrent marker used by G1 to filter remembered
    46 // sets.
    48 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
    49 public:
    50   // Specification of possible DirtyCardToOopClosure filtering.
    51   enum FilterKind {
    52     NoFilterKind,
    53     IntoCSFilterKind,
    54     OutOfRegionFilterKind
    55   };
    57 protected:
    58   HeapRegion* _hr;
    59   FilterKind _fk;
    60   G1CollectedHeap* _g1;
    62   void walk_mem_region_with_cl(MemRegion mr,
    63                                HeapWord* bottom, HeapWord* top,
    64                                OopClosure* cl);
    66   // We don't specialize this for FilteringClosure; filtering is handled by
    67   // the "FilterKind" mechanism.  But we provide this to avoid a compiler
    68   // warning.
    69   void walk_mem_region_with_cl(MemRegion mr,
    70                                HeapWord* bottom, HeapWord* top,
    71                                FilteringClosure* cl) {
    72     HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
    73                                                        (OopClosure*)cl);
    74   }
    76   // Get the actual top of the area on which the closure will
    77   // operate, given where the top is assumed to be (the end of the
    78   // memory region passed to do_MemRegion) and where the object
    79   // at the top is assumed to start. For example, an object may
    80   // start at the top but actually extend past the assumed top,
    81   // in which case the top becomes the end of the object.
    82   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
    83     return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
    84   }
    86   // Walk the given memory region from bottom to (actual) top
    87   // looking for objects and applying the oop closure (_cl) to
    88   // them. The base implementation of this treats the area as
    89   // blocks, where a block may or may not be an object. Sub-
    90   // classes should override this to provide more accurate
    91   // or possibly more efficient walking.
    92   void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
    93     Filtering_DCTOC::walk_mem_region(mr, bottom, top);
    94   }
    96 public:
    97   HeapRegionDCTOC(G1CollectedHeap* g1,
    98                   HeapRegion* hr, OopClosure* cl,
    99                   CardTableModRefBS::PrecisionStyle precision,
   100                   FilterKind fk);
   101 };
   104 // The complicating factor is that BlockOffsetTable diverged
   105 // significantly, and we need functionality that is only in the G1 version.
   106 // So I copied that code, which led to an alternate G1 version of
   107 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
   108 // be reconciled, then G1OffsetTableContigSpace could go away.
   110 // The idea behind time stamps is the following. Doing a save_marks on
   111 // all regions at every GC pause is time consuming (if I remember
   112 // well, 10ms or so). So, we would like to do that only for regions
   113 // that are GC alloc regions. To achieve this, we use time
   114 // stamps. For every evacuation pause, G1CollectedHeap generates a
   115 // unique time stamp (essentially a counter that gets
   116 // incremented). Every time we want to call save_marks on a region,
   117 // we set the saved_mark_word to top and also copy the current GC
   118 // time stamp to the time stamp field of the space. Reading the
   119 // saved_mark_word involves checking the time stamp of the
   120 // region. If it is the same as the current GC time stamp, then we
   121 // can safely read the saved_mark_word field, as it is valid. If the
   122 // time stamp of the region is not the same as the current GC time
   123 // stamp, then we instead read top, as the saved_mark_word field is
   124 // invalid. Time stamps (on the regions and also on the
   125 // G1CollectedHeap) are reset at every cleanup (we iterate over
   126 // the regions anyway) and at the end of a Full GC. The current scheme
   127 // that uses sequential unsigned ints will fail only if we have 4b
   128 // evacuation pauses between two cleanups, which is _highly_ unlikely.
   130 class G1OffsetTableContigSpace: public ContiguousSpace {
   131   friend class VMStructs;
   132  protected:
   133   G1BlockOffsetArrayContigSpace _offsets;
   134   Mutex _par_alloc_lock;
   135   volatile unsigned _gc_time_stamp;
   137  public:
   138   // Constructor.  If "is_zeroed" is true, the MemRegion "mr" may be
   139   // assumed to contain zeros.
   140   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
   141                            MemRegion mr, bool is_zeroed = false);
   143   void set_bottom(HeapWord* value);
   144   void set_end(HeapWord* value);
   146   virtual HeapWord* saved_mark_word() const;
   147   virtual void set_saved_mark();
   148   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
   150   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   151   virtual void clear(bool mangle_space);
   153   HeapWord* block_start(const void* p);
   154   HeapWord* block_start_const(const void* p) const;
   156   // Add offset table update.
   157   virtual HeapWord* allocate(size_t word_size);
   158   HeapWord* par_allocate(size_t word_size);
   160   // MarkSweep support phase3
   161   virtual HeapWord* initialize_threshold();
   162   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   164   virtual void print() const;
   165 };
   167 class HeapRegion: public G1OffsetTableContigSpace {
   168   friend class VMStructs;
   169  private:
   171   enum HumongousType {
   172     NotHumongous = 0,
   173     StartsHumongous,
   174     ContinuesHumongous
   175   };
   177   // The next filter kind that should be used for a "new_dcto_cl" call with
   178   // the "traditional" signature.
   179   HeapRegionDCTOC::FilterKind _next_fk;
   181   // Requires that the region "mr" be dense with objects, and begin and end
   182   // with an object.
   183   void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
   185   // The remembered set for this region.
   186   // (Might want to make this "inline" later, to avoid some alloc failure
   187   // issues.)
   188   HeapRegionRemSet* _rem_set;
   190   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
   192  protected:
   193   // If this region is a member of a HeapRegionSeq, the index in that
   194   // sequence, otherwise -1.
   195   int  _hrs_index;
   197   HumongousType _humongous_type;
   198   // For a humongous region, region in which it starts.
   199   HeapRegion* _humongous_start_region;
   200   // For the start region of a humongous sequence, it's original end().
   201   HeapWord* _orig_end;
   203   // True iff the region is in current collection_set.
   204   bool _in_collection_set;
   206     // True iff the region is on the unclean list, waiting to be zero filled.
   207   bool _is_on_unclean_list;
   209   // True iff the region is on the free list, ready for allocation.
   210   bool _is_on_free_list;
   212   // Is this or has it been an allocation region in the current collection
   213   // pause.
   214   bool _is_gc_alloc_region;
   216   // True iff an attempt to evacuate an object in the region failed.
   217   bool _evacuation_failed;
   219   // A heap region may be a member one of a number of special subsets, each
   220   // represented as linked lists through the field below.  Currently, these
   221   // sets include:
   222   //   The collection set.
   223   //   The set of allocation regions used in a collection pause.
   224   //   Spaces that may contain gray objects.
   225   HeapRegion* _next_in_special_set;
   227   // next region in the young "generation" region set
   228   HeapRegion* _next_young_region;
   230   // Next region whose cards need cleaning
   231   HeapRegion* _next_dirty_cards_region;
   233   // For parallel heapRegion traversal.
   234   jint _claimed;
   236   // We use concurrent marking to determine the amount of live data
   237   // in each heap region.
   238   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
   239   size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
   241   // See "sort_index" method.  -1 means is not in the array.
   242   int _sort_index;
   244   // <PREDICTION>
   245   double _gc_efficiency;
   246   // </PREDICTION>
   248   enum YoungType {
   249     NotYoung,                   // a region is not young
   250     Young,                      // a region is young
   251     Survivor                    // a region is young and it contains
   252                                 // survivor
   253   };
   255   volatile YoungType _young_type;
   256   int  _young_index_in_cset;
   257   SurvRateGroup* _surv_rate_group;
   258   int  _age_index;
   260   // The start of the unmarked area. The unmarked area extends from this
   261   // word until the top and/or end of the region, and is the part
   262   // of the region for which no marking was done, i.e. objects may
   263   // have been allocated in this part since the last mark phase.
   264   // "prev" is the top at the start of the last completed marking.
   265   // "next" is the top at the start of the in-progress marking (if any.)
   266   HeapWord* _prev_top_at_mark_start;
   267   HeapWord* _next_top_at_mark_start;
   268   // If a collection pause is in progress, this is the top at the start
   269   // of that pause.
   271   // We've counted the marked bytes of objects below here.
   272   HeapWord* _top_at_conc_mark_count;
   274   void init_top_at_mark_start() {
   275     assert(_prev_marked_bytes == 0 &&
   276            _next_marked_bytes == 0,
   277            "Must be called after zero_marked_bytes.");
   278     HeapWord* bot = bottom();
   279     _prev_top_at_mark_start = bot;
   280     _next_top_at_mark_start = bot;
   281     _top_at_conc_mark_count = bot;
   282   }
   284   jint _zfs;  // A member of ZeroFillState.  Protected by ZF_lock.
   285   Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
   286                         // made it so.
   288   void set_young_type(YoungType new_type) {
   289     //assert(_young_type != new_type, "setting the same type" );
   290     // TODO: add more assertions here
   291     _young_type = new_type;
   292   }
   294   // Cached attributes used in the collection set policy information
   296   // The RSet length that was added to the total value
   297   // for the collection set.
   298   size_t _recorded_rs_length;
   300   // The predicted elapsed time that was added to total value
   301   // for the collection set.
   302   double _predicted_elapsed_time_ms;
   304   // The predicted number of bytes to copy that was added to
   305   // the total value for the collection set.
   306   size_t _predicted_bytes_to_copy;
   308  public:
   309   // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
   310   HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
   311              MemRegion mr, bool is_zeroed);
   313   static int LogOfHRGrainBytes;
   314   static int LogOfHRGrainWords;
   315   // The normal type of these should be size_t. However, they used to
   316   // be members of an enum before and they are assumed by the
   317   // compilers to be ints. To avoid going and fixing all their uses,
   318   // I'm declaring them as ints. I'm not anticipating heap region
   319   // sizes to reach anywhere near 2g, so using an int here is safe.
   320   static int GrainBytes;
   321   static int GrainWords;
   322   static int CardsPerRegion;
   324   // It sets up the heap region size (GrainBytes / GrainWords), as
   325   // well as other related fields that are based on the heap region
   326   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
   327   // CardsPerRegion). All those fields are considered constant
   328   // throughout the JVM's execution, therefore they should only be set
   329   // up once during initialization time.
   330   static void setup_heap_region_size(uintx min_heap_size);
   332   enum ClaimValues {
   333     InitialClaimValue     = 0,
   334     FinalCountClaimValue  = 1,
   335     NoteEndClaimValue     = 2,
   336     ScrubRemSetClaimValue = 3,
   337     ParVerifyClaimValue   = 4,
   338     RebuildRSClaimValue   = 5
   339   };
   341   // Concurrent refinement requires contiguous heap regions (in which TLABs
   342   // might be allocated) to be zero-filled.  Each region therefore has a
   343   // zero-fill-state.
   344   enum ZeroFillState {
   345     NotZeroFilled,
   346     ZeroFilling,
   347     ZeroFilled,
   348     Allocated
   349   };
   351   // If this region is a member of a HeapRegionSeq, the index in that
   352   // sequence, otherwise -1.
   353   int hrs_index() const { return _hrs_index; }
   354   void set_hrs_index(int index) { _hrs_index = index; }
   356   // The number of bytes marked live in the region in the last marking phase.
   357   size_t marked_bytes()    { return _prev_marked_bytes; }
   358   // The number of bytes counted in the next marking.
   359   size_t next_marked_bytes() { return _next_marked_bytes; }
   360   // The number of bytes live wrt the next marking.
   361   size_t next_live_bytes() {
   362     return (top() - next_top_at_mark_start())
   363       * HeapWordSize
   364       + next_marked_bytes();
   365   }
   367   // A lower bound on the amount of garbage bytes in the region.
   368   size_t garbage_bytes() {
   369     size_t used_at_mark_start_bytes =
   370       (prev_top_at_mark_start() - bottom()) * HeapWordSize;
   371     assert(used_at_mark_start_bytes >= marked_bytes(),
   372            "Can't mark more than we have.");
   373     return used_at_mark_start_bytes - marked_bytes();
   374   }
   376   // An upper bound on the number of live bytes in the region.
   377   size_t max_live_bytes() { return used() - garbage_bytes(); }
   379   void add_to_marked_bytes(size_t incr_bytes) {
   380     _next_marked_bytes = _next_marked_bytes + incr_bytes;
   381     guarantee( _next_marked_bytes <= used(), "invariant" );
   382   }
   384   void zero_marked_bytes()      {
   385     _prev_marked_bytes = _next_marked_bytes = 0;
   386   }
   388   bool isHumongous() const { return _humongous_type != NotHumongous; }
   389   bool startsHumongous() const { return _humongous_type == StartsHumongous; }
   390   bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
   391   // For a humongous region, region in which it starts.
   392   HeapRegion* humongous_start_region() const {
   393     return _humongous_start_region;
   394   }
   396   // Causes the current region to represent a humongous object spanning "n"
   397   // regions.
   398   void set_startsHumongous(HeapWord* new_end);
   400   // The regions that continue a humongous sequence should be added using
   401   // this method, in increasing address order.
   402   void set_continuesHumongous(HeapRegion* start);
   404   // If the region has a remembered set, return a pointer to it.
   405   HeapRegionRemSet* rem_set() const {
   406     return _rem_set;
   407   }
   409   // True iff the region is in current collection_set.
   410   bool in_collection_set() const {
   411     return _in_collection_set;
   412   }
   413   void set_in_collection_set(bool b) {
   414     _in_collection_set = b;
   415   }
   416   HeapRegion* next_in_collection_set() {
   417     assert(in_collection_set(), "should only invoke on member of CS.");
   418     assert(_next_in_special_set == NULL ||
   419            _next_in_special_set->in_collection_set(),
   420            "Malformed CS.");
   421     return _next_in_special_set;
   422   }
   423   void set_next_in_collection_set(HeapRegion* r) {
   424     assert(in_collection_set(), "should only invoke on member of CS.");
   425     assert(r == NULL || r->in_collection_set(), "Malformed CS.");
   426     _next_in_special_set = r;
   427   }
   429   // True iff it is or has been an allocation region in the current
   430   // collection pause.
   431   bool is_gc_alloc_region() const {
   432     return _is_gc_alloc_region;
   433   }
   434   void set_is_gc_alloc_region(bool b) {
   435     _is_gc_alloc_region = b;
   436   }
   437   HeapRegion* next_gc_alloc_region() {
   438     assert(is_gc_alloc_region(), "should only invoke on member of CS.");
   439     assert(_next_in_special_set == NULL ||
   440            _next_in_special_set->is_gc_alloc_region(),
   441            "Malformed CS.");
   442     return _next_in_special_set;
   443   }
   444   void set_next_gc_alloc_region(HeapRegion* r) {
   445     assert(is_gc_alloc_region(), "should only invoke on member of CS.");
   446     assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
   447     _next_in_special_set = r;
   448   }
   450   bool is_on_free_list() {
   451     return _is_on_free_list;
   452   }
   454   void set_on_free_list(bool b) {
   455     _is_on_free_list = b;
   456   }
   458   HeapRegion* next_from_free_list() {
   459     assert(is_on_free_list(),
   460            "Should only invoke on free space.");
   461     assert(_next_in_special_set == NULL ||
   462            _next_in_special_set->is_on_free_list(),
   463            "Malformed Free List.");
   464     return _next_in_special_set;
   465   }
   467   void set_next_on_free_list(HeapRegion* r) {
   468     assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
   469     _next_in_special_set = r;
   470   }
   472   bool is_on_unclean_list() {
   473     return _is_on_unclean_list;
   474   }
   476   void set_on_unclean_list(bool b);
   478   HeapRegion* next_from_unclean_list() {
   479     assert(is_on_unclean_list(),
   480            "Should only invoke on unclean space.");
   481     assert(_next_in_special_set == NULL ||
   482            _next_in_special_set->is_on_unclean_list(),
   483            "Malformed unclean List.");
   484     return _next_in_special_set;
   485   }
   487   void set_next_on_unclean_list(HeapRegion* r);
   489   HeapRegion* get_next_young_region() { return _next_young_region; }
   490   void set_next_young_region(HeapRegion* hr) {
   491     _next_young_region = hr;
   492   }
   494   HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
   495   HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
   496   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
   497   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
   499   // Allows logical separation between objects allocated before and after.
   500   void save_marks();
   502   // Reset HR stuff to default values.
   503   void hr_clear(bool par, bool clear_space);
   505   void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   507   // Ensure that "this" is zero-filled.
   508   void ensure_zero_filled();
   509   // This one requires that the calling thread holds ZF_mon.
   510   void ensure_zero_filled_locked();
   512   // Get the start of the unmarked area in this region.
   513   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
   514   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
   516   // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
   517   // allocated in the current region before the last call to "save_mark".
   518   void oop_before_save_marks_iterate(OopClosure* cl);
   520   // This call determines the "filter kind" argument that will be used for
   521   // the next call to "new_dcto_cl" on this region with the "traditional"
   522   // signature (i.e., the call below.)  The default, in the absence of a
   523   // preceding call to this method, is "NoFilterKind", and a call to this
   524   // method is necessary for each such call, or else it reverts to the
   525   // default.
   526   // (This is really ugly, but all other methods I could think of changed a
   527   // lot of main-line code for G1.)
   528   void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
   529     _next_fk = nfk;
   530   }
   532   DirtyCardToOopClosure*
   533   new_dcto_closure(OopClosure* cl,
   534                    CardTableModRefBS::PrecisionStyle precision,
   535                    HeapRegionDCTOC::FilterKind fk);
   537 #if WHASSUP
   538   DirtyCardToOopClosure*
   539   new_dcto_closure(OopClosure* cl,
   540                    CardTableModRefBS::PrecisionStyle precision,
   541                    HeapWord* boundary) {
   542     assert(boundary == NULL, "This arg doesn't make sense here.");
   543     DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
   544     _next_fk = HeapRegionDCTOC::NoFilterKind;
   545     return res;
   546   }
   547 #endif
   549   //
   550   // Note the start or end of marking. This tells the heap region
   551   // that the collector is about to start or has finished (concurrently)
   552   // marking the heap.
   553   //
   555   // Note the start of a marking phase. Record the
   556   // start of the unmarked area of the region here.
   557   void note_start_of_marking(bool during_initial_mark) {
   558     init_top_at_conc_mark_count();
   559     _next_marked_bytes = 0;
   560     if (during_initial_mark && is_young() && !is_survivor())
   561       _next_top_at_mark_start = bottom();
   562     else
   563       _next_top_at_mark_start = top();
   564   }
   566   // Note the end of a marking phase. Install the start of
   567   // the unmarked area that was captured at start of marking.
   568   void note_end_of_marking() {
   569     _prev_top_at_mark_start = _next_top_at_mark_start;
   570     _prev_marked_bytes = _next_marked_bytes;
   571     _next_marked_bytes = 0;
   573     guarantee(_prev_marked_bytes <=
   574               (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
   575               "invariant");
   576   }
   578   // After an evacuation, we need to update _next_top_at_mark_start
   579   // to be the current top.  Note this is only valid if we have only
   580   // ever evacuated into this region.  If we evacuate, allocate, and
   581   // then evacuate we are in deep doodoo.
   582   void note_end_of_copying() {
   583     assert(top() >= _next_top_at_mark_start, "Increase only");
   584     _next_top_at_mark_start = top();
   585   }
   587   // Returns "false" iff no object in the region was allocated when the
   588   // last mark phase ended.
   589   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
   591   // If "is_marked()" is true, then this is the index of the region in
   592   // an array constructed at the end of marking of the regions in a
   593   // "desirability" order.
   594   int sort_index() {
   595     return _sort_index;
   596   }
   597   void set_sort_index(int i) {
   598     _sort_index = i;
   599   }
   601   void init_top_at_conc_mark_count() {
   602     _top_at_conc_mark_count = bottom();
   603   }
   605   void set_top_at_conc_mark_count(HeapWord *cur) {
   606     assert(bottom() <= cur && cur <= end(), "Sanity.");
   607     _top_at_conc_mark_count = cur;
   608   }
   610   HeapWord* top_at_conc_mark_count() {
   611     return _top_at_conc_mark_count;
   612   }
   614   void reset_during_compaction() {
   615     guarantee( isHumongous() && startsHumongous(),
   616                "should only be called for humongous regions");
   618     zero_marked_bytes();
   619     init_top_at_mark_start();
   620   }
   622   // <PREDICTION>
   623   void calc_gc_efficiency(void);
   624   double gc_efficiency() { return _gc_efficiency;}
   625   // </PREDICTION>
   627   bool is_young() const     { return _young_type != NotYoung; }
   628   bool is_survivor() const  { return _young_type == Survivor; }
   630   int  young_index_in_cset() const { return _young_index_in_cset; }
   631   void set_young_index_in_cset(int index) {
   632     assert( (index == -1) || is_young(), "pre-condition" );
   633     _young_index_in_cset = index;
   634   }
   636   int age_in_surv_rate_group() {
   637     assert( _surv_rate_group != NULL, "pre-condition" );
   638     assert( _age_index > -1, "pre-condition" );
   639     return _surv_rate_group->age_in_group(_age_index);
   640   }
   642   void record_surv_words_in_group(size_t words_survived) {
   643     assert( _surv_rate_group != NULL, "pre-condition" );
   644     assert( _age_index > -1, "pre-condition" );
   645     int age_in_group = age_in_surv_rate_group();
   646     _surv_rate_group->record_surviving_words(age_in_group, words_survived);
   647   }
   649   int age_in_surv_rate_group_cond() {
   650     if (_surv_rate_group != NULL)
   651       return age_in_surv_rate_group();
   652     else
   653       return -1;
   654   }
   656   SurvRateGroup* surv_rate_group() {
   657     return _surv_rate_group;
   658   }
   660   void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
   661     assert( surv_rate_group != NULL, "pre-condition" );
   662     assert( _surv_rate_group == NULL, "pre-condition" );
   663     assert( is_young(), "pre-condition" );
   665     _surv_rate_group = surv_rate_group;
   666     _age_index = surv_rate_group->next_age_index();
   667   }
   669   void uninstall_surv_rate_group() {
   670     if (_surv_rate_group != NULL) {
   671       assert( _age_index > -1, "pre-condition" );
   672       assert( is_young(), "pre-condition" );
   674       _surv_rate_group = NULL;
   675       _age_index = -1;
   676     } else {
   677       assert( _age_index == -1, "pre-condition" );
   678     }
   679   }
   681   void set_young() { set_young_type(Young); }
   683   void set_survivor() { set_young_type(Survivor); }
   685   void set_not_young() { set_young_type(NotYoung); }
   687   // Determine if an object has been allocated since the last
   688   // mark performed by the collector. This returns true iff the object
   689   // is within the unmarked area of the region.
   690   bool obj_allocated_since_prev_marking(oop obj) const {
   691     return (HeapWord *) obj >= prev_top_at_mark_start();
   692   }
   693   bool obj_allocated_since_next_marking(oop obj) const {
   694     return (HeapWord *) obj >= next_top_at_mark_start();
   695   }
   697   // For parallel heapRegion traversal.
   698   bool claimHeapRegion(int claimValue);
   699   jint claim_value() { return _claimed; }
   700   // Use this carefully: only when you're sure no one is claiming...
   701   void set_claim_value(int claimValue) { _claimed = claimValue; }
   703   // Returns the "evacuation_failed" property of the region.
   704   bool evacuation_failed() { return _evacuation_failed; }
   706   // Sets the "evacuation_failed" property of the region.
   707   void set_evacuation_failed(bool b) {
   708     _evacuation_failed = b;
   710     if (b) {
   711       init_top_at_conc_mark_count();
   712       _next_marked_bytes = 0;
   713     }
   714   }
   716   // Requires that "mr" be entirely within the region.
   717   // Apply "cl->do_object" to all objects that intersect with "mr".
   718   // If the iteration encounters an unparseable portion of the region,
   719   // or if "cl->abort()" is true after a closure application,
   720   // terminate the iteration and return the address of the start of the
   721   // subregion that isn't done.  (The two can be distinguished by querying
   722   // "cl->abort()".)  Return of "NULL" indicates that the iteration
   723   // completed.
   724   HeapWord*
   725   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
   727   // In this version - if filter_young is true and the region
   728   // is a young region then we skip the iteration.
   729   HeapWord*
   730   oops_on_card_seq_iterate_careful(MemRegion mr,
   731                                    FilterOutOfRegionClosure* cl,
   732                                    bool filter_young);
   734   // A version of block start that is guaranteed to find *some* block
   735   // boundary at or before "p", but does not object iteration, and may
   736   // therefore be used safely when the heap is unparseable.
   737   HeapWord* block_start_careful(const void* p) const {
   738     return _offsets.block_start_careful(p);
   739   }
   741   // Requires that "addr" is within the region.  Returns the start of the
   742   // first ("careful") block that starts at or after "addr", or else the
   743   // "end" of the region if there is no such block.
   744   HeapWord* next_block_start_careful(HeapWord* addr);
   746   // Returns the zero-fill-state of the current region.
   747   ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
   748   bool zero_fill_is_allocated() { return _zfs == Allocated; }
   749   Thread* zero_filler() { return _zero_filler; }
   751   // Indicate that the contents of the region are unknown, and therefore
   752   // might require zero-filling.
   753   void set_zero_fill_needed() {
   754     set_zero_fill_state_work(NotZeroFilled);
   755   }
   756   void set_zero_fill_in_progress(Thread* t) {
   757     set_zero_fill_state_work(ZeroFilling);
   758     _zero_filler = t;
   759   }
   760   void set_zero_fill_complete();
   761   void set_zero_fill_allocated() {
   762     set_zero_fill_state_work(Allocated);
   763   }
   765   void set_zero_fill_state_work(ZeroFillState zfs);
   767   // This is called when a full collection shrinks the heap.
   768   // We want to set the heap region to a value which says
   769   // it is no longer part of the heap.  For now, we'll let "NotZF" fill
   770   // that role.
   771   void reset_zero_fill() {
   772     set_zero_fill_state_work(NotZeroFilled);
   773     _zero_filler = NULL;
   774   }
   776   size_t recorded_rs_length() const        { return _recorded_rs_length; }
   777   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
   778   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
   780   void set_recorded_rs_length(size_t rs_length) {
   781     _recorded_rs_length = rs_length;
   782   }
   784   void set_predicted_elapsed_time_ms(double ms) {
   785     _predicted_elapsed_time_ms = ms;
   786   }
   788   void set_predicted_bytes_to_copy(size_t bytes) {
   789     _predicted_bytes_to_copy = bytes;
   790   }
   792 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
   793   virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
   794   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
   796   CompactibleSpace* next_compaction_space() const;
   798   virtual void reset_after_compaction();
   800   void print() const;
   801   void print_on(outputStream* st) const;
   803   // use_prev_marking == true  -> use "prev" marking information,
   804   // use_prev_marking == false -> use "next" marking information
   805   // NOTE: Only the "prev" marking information is guaranteed to be
   806   // consistent most of the time, so most calls to this should use
   807   // use_prev_marking == true. Currently, there is only one case where
   808   // this is called with use_prev_marking == false, which is to verify
   809   // the "next" marking information at the end of remark.
   810   void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
   812   // Override; it uses the "prev" marking information
   813   virtual void verify(bool allow_dirty) const;
   815 #ifdef DEBUG
   816   HeapWord* allocate(size_t size);
   817 #endif
   818 };
   820 // HeapRegionClosure is used for iterating over regions.
   821 // Terminates the iteration when the "doHeapRegion" method returns "true".
   822 class HeapRegionClosure : public StackObj {
   823   friend class HeapRegionSeq;
   824   friend class G1CollectedHeap;
   826   bool _complete;
   827   void incomplete() { _complete = false; }
   829  public:
   830   HeapRegionClosure(): _complete(true) {}
   832   // Typically called on each region until it returns true.
   833   virtual bool doHeapRegion(HeapRegion* r) = 0;
   835   // True after iteration if the closure was applied to all heap regions
   836   // and returned "false" in all cases.
   837   bool complete() { return _complete; }
   838 };
   840 // A linked lists of heap regions.  It leaves the "next" field
   841 // unspecified; that's up to subtypes.
   842 class RegionList VALUE_OBJ_CLASS_SPEC {
   843 protected:
   844   virtual HeapRegion* get_next(HeapRegion* chr) = 0;
   845   virtual void set_next(HeapRegion* chr,
   846                         HeapRegion* new_next) = 0;
   848   HeapRegion* _hd;
   849   HeapRegion* _tl;
   850   size_t _sz;
   852   // Protected constructor because this type is only meaningful
   853   // when the _get/_set next functions are defined.
   854   RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
   855 public:
   856   void reset() {
   857     _hd = NULL;
   858     _tl = NULL;
   859     _sz = 0;
   860   }
   861   HeapRegion* hd() { return _hd; }
   862   HeapRegion* tl() { return _tl; }
   863   size_t sz() { return _sz; }
   864   size_t length();
   866   bool well_formed() {
   867     return
   868       ((hd() == NULL && tl() == NULL && sz() == 0)
   869        || (hd() != NULL && tl() != NULL && sz() > 0))
   870       && (sz() == length());
   871   }
   872   virtual void insert_before_head(HeapRegion* r);
   873   void prepend_list(RegionList* new_list);
   874   virtual HeapRegion* pop();
   875   void dec_sz() { _sz--; }
   876   // Requires that "r" is an element of the list, and is not the tail.
   877   void delete_after(HeapRegion* r);
   878 };
   880 class EmptyNonHRegionList: public RegionList {
   881 protected:
   882   // Protected constructor because this type is only meaningful
   883   // when the _get/_set next functions are defined.
   884   EmptyNonHRegionList() : RegionList() {}
   886 public:
   887   void insert_before_head(HeapRegion* r) {
   888     //    assert(r->is_empty(), "Better be empty");
   889     assert(!r->isHumongous(), "Better not be humongous.");
   890     RegionList::insert_before_head(r);
   891   }
   892   void prepend_list(EmptyNonHRegionList* new_list) {
   893     //    assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
   894     //     "Better be empty");
   895     assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
   896            "Better not be humongous.");
   897     //    assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
   898     //     "Better be empty");
   899     assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
   900            "Better not be humongous.");
   901     RegionList::prepend_list(new_list);
   902   }
   903 };
   905 class UncleanRegionList: public EmptyNonHRegionList {
   906 public:
   907   HeapRegion* get_next(HeapRegion* hr) {
   908     return hr->next_from_unclean_list();
   909   }
   910   void set_next(HeapRegion* hr, HeapRegion* new_next) {
   911     hr->set_next_on_unclean_list(new_next);
   912   }
   914   UncleanRegionList() : EmptyNonHRegionList() {}
   916   void insert_before_head(HeapRegion* r) {
   917     assert(!r->is_on_free_list(),
   918            "Better not already be on free list");
   919     assert(!r->is_on_unclean_list(),
   920            "Better not already be on unclean list");
   921     r->set_zero_fill_needed();
   922     r->set_on_unclean_list(true);
   923     EmptyNonHRegionList::insert_before_head(r);
   924   }
   925   void prepend_list(UncleanRegionList* new_list) {
   926     assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
   927            "Better not already be on free list");
   928     assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
   929            "Better already be marked as on unclean list");
   930     assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
   931            "Better not already be on free list");
   932     assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
   933            "Better already be marked as on unclean list");
   934     EmptyNonHRegionList::prepend_list(new_list);
   935   }
   936   HeapRegion* pop() {
   937     HeapRegion* res = RegionList::pop();
   938     if (res != NULL) res->set_on_unclean_list(false);
   939     return res;
   940   }
   941 };
   943 // Local Variables: ***
   944 // c-indentation-style: gnu ***
   945 // End: ***
   947 #endif // SERIALGC

mercurial