src/share/vm/gc_implementation/g1/heapRegion.hpp

Thu, 27 May 2010 19:08:38 -0700

author
trims
date
Thu, 27 May 2010 19:08:38 -0700
changeset 1907
c18cbe5936b8
parent 1829
1316cec51b4d
child 2021
5cbac8938c4c
permissions
-rw-r--r--

6941466: Oracle rebranding changes for Hotspot repositories
Summary: Change all the Sun copyrights to Oracle copyright
Reviewed-by: ohair

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SERIALGC
    27 // A HeapRegion is the smallest piece of a G1CollectedHeap that
    28 // can be collected independently.
    30 // NOTE: Although a HeapRegion is a Space, its
    31 // Space::initDirtyCardClosure method must not be called.
    32 // The problem is that the existence of this method breaks
    33 // the independence of barrier sets from remembered sets.
    34 // The solution is to remove this method from the definition
    35 // of a Space.
    37 class CompactibleSpace;
    38 class ContiguousSpace;
    39 class HeapRegionRemSet;
    40 class HeapRegionRemSetIterator;
    41 class HeapRegion;
    43 // A dirty card to oop closure for heap regions. It
    44 // knows how to get the G1 heap and how to use the bitmap
    45 // in the concurrent marker used by G1 to filter remembered
    46 // sets.
    48 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
    49 public:
    50   // Specification of possible DirtyCardToOopClosure filtering.
    51   enum FilterKind {
    52     NoFilterKind,
    53     IntoCSFilterKind,
    54     OutOfRegionFilterKind
    55   };
    57 protected:
    58   HeapRegion* _hr;
    59   FilterKind _fk;
    60   G1CollectedHeap* _g1;
    62   void walk_mem_region_with_cl(MemRegion mr,
    63                                HeapWord* bottom, HeapWord* top,
    64                                OopClosure* cl);
    66   // We don't specialize this for FilteringClosure; filtering is handled by
    67   // the "FilterKind" mechanism.  But we provide this to avoid a compiler
    68   // warning.
    69   void walk_mem_region_with_cl(MemRegion mr,
    70                                HeapWord* bottom, HeapWord* top,
    71                                FilteringClosure* cl) {
    72     HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
    73                                                        (OopClosure*)cl);
    74   }
    76   // Get the actual top of the area on which the closure will
    77   // operate, given where the top is assumed to be (the end of the
    78   // memory region passed to do_MemRegion) and where the object
    79   // at the top is assumed to start. For example, an object may
    80   // start at the top but actually extend past the assumed top,
    81   // in which case the top becomes the end of the object.
    82   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
    83     return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
    84   }
    86   // Walk the given memory region from bottom to (actual) top
    87   // looking for objects and applying the oop closure (_cl) to
    88   // them. The base implementation of this treats the area as
    89   // blocks, where a block may or may not be an object. Sub-
    90   // classes should override this to provide more accurate
    91   // or possibly more efficient walking.
    92   void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
    93     Filtering_DCTOC::walk_mem_region(mr, bottom, top);
    94   }
    96 public:
    97   HeapRegionDCTOC(G1CollectedHeap* g1,
    98                   HeapRegion* hr, OopClosure* cl,
    99                   CardTableModRefBS::PrecisionStyle precision,
   100                   FilterKind fk);
   101 };
   104 // The complicating factor is that BlockOffsetTable diverged
   105 // significantly, and we need functionality that is only in the G1 version.
   106 // So I copied that code, which led to an alternate G1 version of
   107 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
   108 // be reconciled, then G1OffsetTableContigSpace could go away.
   110 // The idea behind time stamps is the following. Doing a save_marks on
   111 // all regions at every GC pause is time consuming (if I remember
   112 // well, 10ms or so). So, we would like to do that only for regions
   113 // that are GC alloc regions. To achieve this, we use time
   114 // stamps. For every evacuation pause, G1CollectedHeap generates a
   115 // unique time stamp (essentially a counter that gets
   116 // incremented). Every time we want to call save_marks on a region,
   117 // we set the saved_mark_word to top and also copy the current GC
   118 // time stamp to the time stamp field of the space. Reading the
   119 // saved_mark_word involves checking the time stamp of the
   120 // region. If it is the same as the current GC time stamp, then we
   121 // can safely read the saved_mark_word field, as it is valid. If the
   122 // time stamp of the region is not the same as the current GC time
   123 // stamp, then we instead read top, as the saved_mark_word field is
   124 // invalid. Time stamps (on the regions and also on the
   125 // G1CollectedHeap) are reset at every cleanup (we iterate over
   126 // the regions anyway) and at the end of a Full GC. The current scheme
   127 // that uses sequential unsigned ints will fail only if we have 4b
   128 // evacuation pauses between two cleanups, which is _highly_ unlikely.
   130 class G1OffsetTableContigSpace: public ContiguousSpace {
   131   friend class VMStructs;
   132  protected:
   133   G1BlockOffsetArrayContigSpace _offsets;
   134   Mutex _par_alloc_lock;
   135   volatile unsigned _gc_time_stamp;
   137  public:
   138   // Constructor.  If "is_zeroed" is true, the MemRegion "mr" may be
   139   // assumed to contain zeros.
   140   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
   141                            MemRegion mr, bool is_zeroed = false);
   143   void set_bottom(HeapWord* value);
   144   void set_end(HeapWord* value);
   146   virtual HeapWord* saved_mark_word() const;
   147   virtual void set_saved_mark();
   148   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
   150   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   151   virtual void clear(bool mangle_space);
   153   HeapWord* block_start(const void* p);
   154   HeapWord* block_start_const(const void* p) const;
   156   // Add offset table update.
   157   virtual HeapWord* allocate(size_t word_size);
   158   HeapWord* par_allocate(size_t word_size);
   160   // MarkSweep support phase3
   161   virtual HeapWord* initialize_threshold();
   162   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   164   virtual void print() const;
   165 };
   167 class HeapRegion: public G1OffsetTableContigSpace {
   168   friend class VMStructs;
   169  private:
   171   enum HumongousType {
   172     NotHumongous = 0,
   173     StartsHumongous,
   174     ContinuesHumongous
   175   };
   177   // The next filter kind that should be used for a "new_dcto_cl" call with
   178   // the "traditional" signature.
   179   HeapRegionDCTOC::FilterKind _next_fk;
   181   // Requires that the region "mr" be dense with objects, and begin and end
   182   // with an object.
   183   void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
   185   // The remembered set for this region.
   186   // (Might want to make this "inline" later, to avoid some alloc failure
   187   // issues.)
   188   HeapRegionRemSet* _rem_set;
   190   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
   192  protected:
   193   // If this region is a member of a HeapRegionSeq, the index in that
   194   // sequence, otherwise -1.
   195   int  _hrs_index;
   197   HumongousType _humongous_type;
   198   // For a humongous region, region in which it starts.
   199   HeapRegion* _humongous_start_region;
   200   // For the start region of a humongous sequence, it's original end().
   201   HeapWord* _orig_end;
   203   // True iff the region is in current collection_set.
   204   bool _in_collection_set;
   206     // True iff the region is on the unclean list, waiting to be zero filled.
   207   bool _is_on_unclean_list;
   209   // True iff the region is on the free list, ready for allocation.
   210   bool _is_on_free_list;
   212   // Is this or has it been an allocation region in the current collection
   213   // pause.
   214   bool _is_gc_alloc_region;
   216   // True iff an attempt to evacuate an object in the region failed.
   217   bool _evacuation_failed;
   219   // A heap region may be a member one of a number of special subsets, each
   220   // represented as linked lists through the field below.  Currently, these
   221   // sets include:
   222   //   The collection set.
   223   //   The set of allocation regions used in a collection pause.
   224   //   Spaces that may contain gray objects.
   225   HeapRegion* _next_in_special_set;
   227   // next region in the young "generation" region set
   228   HeapRegion* _next_young_region;
   230   // Next region whose cards need cleaning
   231   HeapRegion* _next_dirty_cards_region;
   233   // For parallel heapRegion traversal.
   234   jint _claimed;
   236   // We use concurrent marking to determine the amount of live data
   237   // in each heap region.
   238   size_t _prev_marked_bytes;    // Bytes known to be live via last completed marking.
   239   size_t _next_marked_bytes;    // Bytes known to be live via in-progress marking.
   241   // See "sort_index" method.  -1 means is not in the array.
   242   int _sort_index;
   244   // <PREDICTION>
   245   double _gc_efficiency;
   246   // </PREDICTION>
   248   enum YoungType {
   249     NotYoung,                   // a region is not young
   250     Young,                      // a region is young
   251     Survivor                    // a region is young and it contains
   252                                 // survivor
   253   };
   255   YoungType _young_type;
   256   int  _young_index_in_cset;
   257   SurvRateGroup* _surv_rate_group;
   258   int  _age_index;
   260   // The start of the unmarked area. The unmarked area extends from this
   261   // word until the top and/or end of the region, and is the part
   262   // of the region for which no marking was done, i.e. objects may
   263   // have been allocated in this part since the last mark phase.
   264   // "prev" is the top at the start of the last completed marking.
   265   // "next" is the top at the start of the in-progress marking (if any.)
   266   HeapWord* _prev_top_at_mark_start;
   267   HeapWord* _next_top_at_mark_start;
   268   // If a collection pause is in progress, this is the top at the start
   269   // of that pause.
   271   // We've counted the marked bytes of objects below here.
   272   HeapWord* _top_at_conc_mark_count;
   274   void init_top_at_mark_start() {
   275     assert(_prev_marked_bytes == 0 &&
   276            _next_marked_bytes == 0,
   277            "Must be called after zero_marked_bytes.");
   278     HeapWord* bot = bottom();
   279     _prev_top_at_mark_start = bot;
   280     _next_top_at_mark_start = bot;
   281     _top_at_conc_mark_count = bot;
   282   }
   284   jint _zfs;  // A member of ZeroFillState.  Protected by ZF_lock.
   285   Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
   286                         // made it so.
   288   void set_young_type(YoungType new_type) {
   289     //assert(_young_type != new_type, "setting the same type" );
   290     // TODO: add more assertions here
   291     _young_type = new_type;
   292   }
   294   // Cached attributes used in the collection set policy information
   296   // The RSet length that was added to the total value
   297   // for the collection set.
   298   size_t _recorded_rs_length;
   300   // The predicted elapsed time that was added to total value
   301   // for the collection set.
   302   double _predicted_elapsed_time_ms;
   304   // The predicted number of bytes to copy that was added to
   305   // the total value for the collection set.
   306   size_t _predicted_bytes_to_copy;
   308  public:
   309   // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
   310   HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
   311              MemRegion mr, bool is_zeroed);
   313   static int LogOfHRGrainBytes;
   314   static int LogOfHRGrainWords;
   315   // The normal type of these should be size_t. However, they used to
   316   // be members of an enum before and they are assumed by the
   317   // compilers to be ints. To avoid going and fixing all their uses,
   318   // I'm declaring them as ints. I'm not anticipating heap region
   319   // sizes to reach anywhere near 2g, so using an int here is safe.
   320   static int GrainBytes;
   321   static int GrainWords;
   322   static int CardsPerRegion;
   324   // It sets up the heap region size (GrainBytes / GrainWords), as
   325   // well as other related fields that are based on the heap region
   326   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
   327   // CardsPerRegion). All those fields are considered constant
   328   // throughout the JVM's execution, therefore they should only be set
   329   // up once during initialization time.
   330   static void setup_heap_region_size(uintx min_heap_size);
   332   enum ClaimValues {
   333     InitialClaimValue     = 0,
   334     FinalCountClaimValue  = 1,
   335     NoteEndClaimValue     = 2,
   336     ScrubRemSetClaimValue = 3,
   337     ParVerifyClaimValue   = 4,
   338     RebuildRSClaimValue   = 5
   339   };
   341   // Concurrent refinement requires contiguous heap regions (in which TLABs
   342   // might be allocated) to be zero-filled.  Each region therefore has a
   343   // zero-fill-state.
   344   enum ZeroFillState {
   345     NotZeroFilled,
   346     ZeroFilling,
   347     ZeroFilled,
   348     Allocated
   349   };
   351   // If this region is a member of a HeapRegionSeq, the index in that
   352   // sequence, otherwise -1.
   353   int hrs_index() const { return _hrs_index; }
   354   void set_hrs_index(int index) { _hrs_index = index; }
   356   // The number of bytes marked live in the region in the last marking phase.
   357   size_t marked_bytes()    { return _prev_marked_bytes; }
   358   // The number of bytes counted in the next marking.
   359   size_t next_marked_bytes() { return _next_marked_bytes; }
   360   // The number of bytes live wrt the next marking.
   361   size_t next_live_bytes() {
   362     return (top() - next_top_at_mark_start())
   363       * HeapWordSize
   364       + next_marked_bytes();
   365   }
   367   // A lower bound on the amount of garbage bytes in the region.
   368   size_t garbage_bytes() {
   369     size_t used_at_mark_start_bytes =
   370       (prev_top_at_mark_start() - bottom()) * HeapWordSize;
   371     assert(used_at_mark_start_bytes >= marked_bytes(),
   372            "Can't mark more than we have.");
   373     return used_at_mark_start_bytes - marked_bytes();
   374   }
   376   // An upper bound on the number of live bytes in the region.
   377   size_t max_live_bytes() { return used() - garbage_bytes(); }
   379   void add_to_marked_bytes(size_t incr_bytes) {
   380     _next_marked_bytes = _next_marked_bytes + incr_bytes;
   381     guarantee( _next_marked_bytes <= used(), "invariant" );
   382   }
   384   void zero_marked_bytes()      {
   385     _prev_marked_bytes = _next_marked_bytes = 0;
   386   }
   388   bool isHumongous() const { return _humongous_type != NotHumongous; }
   389   bool startsHumongous() const { return _humongous_type == StartsHumongous; }
   390   bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
   391   // For a humongous region, region in which it starts.
   392   HeapRegion* humongous_start_region() const {
   393     return _humongous_start_region;
   394   }
   396   // Causes the current region to represent a humongous object spanning "n"
   397   // regions.
   398   virtual void set_startsHumongous();
   400   // The regions that continue a humongous sequence should be added using
   401   // this method, in increasing address order.
   402   void set_continuesHumongous(HeapRegion* start);
   404   void add_continuingHumongousRegion(HeapRegion* cont);
   406   // If the region has a remembered set, return a pointer to it.
   407   HeapRegionRemSet* rem_set() const {
   408     return _rem_set;
   409   }
   411   // True iff the region is in current collection_set.
   412   bool in_collection_set() const {
   413     return _in_collection_set;
   414   }
   415   void set_in_collection_set(bool b) {
   416     _in_collection_set = b;
   417   }
   418   HeapRegion* next_in_collection_set() {
   419     assert(in_collection_set(), "should only invoke on member of CS.");
   420     assert(_next_in_special_set == NULL ||
   421            _next_in_special_set->in_collection_set(),
   422            "Malformed CS.");
   423     return _next_in_special_set;
   424   }
   425   void set_next_in_collection_set(HeapRegion* r) {
   426     assert(in_collection_set(), "should only invoke on member of CS.");
   427     assert(r == NULL || r->in_collection_set(), "Malformed CS.");
   428     _next_in_special_set = r;
   429   }
   431   // True iff it is or has been an allocation region in the current
   432   // collection pause.
   433   bool is_gc_alloc_region() const {
   434     return _is_gc_alloc_region;
   435   }
   436   void set_is_gc_alloc_region(bool b) {
   437     _is_gc_alloc_region = b;
   438   }
   439   HeapRegion* next_gc_alloc_region() {
   440     assert(is_gc_alloc_region(), "should only invoke on member of CS.");
   441     assert(_next_in_special_set == NULL ||
   442            _next_in_special_set->is_gc_alloc_region(),
   443            "Malformed CS.");
   444     return _next_in_special_set;
   445   }
   446   void set_next_gc_alloc_region(HeapRegion* r) {
   447     assert(is_gc_alloc_region(), "should only invoke on member of CS.");
   448     assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
   449     _next_in_special_set = r;
   450   }
   452   bool is_on_free_list() {
   453     return _is_on_free_list;
   454   }
   456   void set_on_free_list(bool b) {
   457     _is_on_free_list = b;
   458   }
   460   HeapRegion* next_from_free_list() {
   461     assert(is_on_free_list(),
   462            "Should only invoke on free space.");
   463     assert(_next_in_special_set == NULL ||
   464            _next_in_special_set->is_on_free_list(),
   465            "Malformed Free List.");
   466     return _next_in_special_set;
   467   }
   469   void set_next_on_free_list(HeapRegion* r) {
   470     assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
   471     _next_in_special_set = r;
   472   }
   474   bool is_on_unclean_list() {
   475     return _is_on_unclean_list;
   476   }
   478   void set_on_unclean_list(bool b);
   480   HeapRegion* next_from_unclean_list() {
   481     assert(is_on_unclean_list(),
   482            "Should only invoke on unclean space.");
   483     assert(_next_in_special_set == NULL ||
   484            _next_in_special_set->is_on_unclean_list(),
   485            "Malformed unclean List.");
   486     return _next_in_special_set;
   487   }
   489   void set_next_on_unclean_list(HeapRegion* r);
   491   HeapRegion* get_next_young_region() { return _next_young_region; }
   492   void set_next_young_region(HeapRegion* hr) {
   493     _next_young_region = hr;
   494   }
   496   HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
   497   HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
   498   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
   499   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
   501   // Allows logical separation between objects allocated before and after.
   502   void save_marks();
   504   // Reset HR stuff to default values.
   505   void hr_clear(bool par, bool clear_space);
   507   void initialize(MemRegion mr, bool clear_space, bool mangle_space);
   509   // Ensure that "this" is zero-filled.
   510   void ensure_zero_filled();
   511   // This one requires that the calling thread holds ZF_mon.
   512   void ensure_zero_filled_locked();
   514   // Get the start of the unmarked area in this region.
   515   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
   516   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
   518   // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
   519   // allocated in the current region before the last call to "save_mark".
   520   void oop_before_save_marks_iterate(OopClosure* cl);
   522   // This call determines the "filter kind" argument that will be used for
   523   // the next call to "new_dcto_cl" on this region with the "traditional"
   524   // signature (i.e., the call below.)  The default, in the absence of a
   525   // preceding call to this method, is "NoFilterKind", and a call to this
   526   // method is necessary for each such call, or else it reverts to the
   527   // default.
   528   // (This is really ugly, but all other methods I could think of changed a
   529   // lot of main-line code for G1.)
   530   void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
   531     _next_fk = nfk;
   532   }
   534   DirtyCardToOopClosure*
   535   new_dcto_closure(OopClosure* cl,
   536                    CardTableModRefBS::PrecisionStyle precision,
   537                    HeapRegionDCTOC::FilterKind fk);
   539 #if WHASSUP
   540   DirtyCardToOopClosure*
   541   new_dcto_closure(OopClosure* cl,
   542                    CardTableModRefBS::PrecisionStyle precision,
   543                    HeapWord* boundary) {
   544     assert(boundary == NULL, "This arg doesn't make sense here.");
   545     DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
   546     _next_fk = HeapRegionDCTOC::NoFilterKind;
   547     return res;
   548   }
   549 #endif
   551   //
   552   // Note the start or end of marking. This tells the heap region
   553   // that the collector is about to start or has finished (concurrently)
   554   // marking the heap.
   555   //
   557   // Note the start of a marking phase. Record the
   558   // start of the unmarked area of the region here.
   559   void note_start_of_marking(bool during_initial_mark) {
   560     init_top_at_conc_mark_count();
   561     _next_marked_bytes = 0;
   562     if (during_initial_mark && is_young() && !is_survivor())
   563       _next_top_at_mark_start = bottom();
   564     else
   565       _next_top_at_mark_start = top();
   566   }
   568   // Note the end of a marking phase. Install the start of
   569   // the unmarked area that was captured at start of marking.
   570   void note_end_of_marking() {
   571     _prev_top_at_mark_start = _next_top_at_mark_start;
   572     _prev_marked_bytes = _next_marked_bytes;
   573     _next_marked_bytes = 0;
   575     guarantee(_prev_marked_bytes <=
   576               (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
   577               "invariant");
   578   }
   580   // After an evacuation, we need to update _next_top_at_mark_start
   581   // to be the current top.  Note this is only valid if we have only
   582   // ever evacuated into this region.  If we evacuate, allocate, and
   583   // then evacuate we are in deep doodoo.
   584   void note_end_of_copying() {
   585     assert(top() >= _next_top_at_mark_start, "Increase only");
   586     _next_top_at_mark_start = top();
   587   }
   589   // Returns "false" iff no object in the region was allocated when the
   590   // last mark phase ended.
   591   bool is_marked() { return _prev_top_at_mark_start != bottom(); }
   593   // If "is_marked()" is true, then this is the index of the region in
   594   // an array constructed at the end of marking of the regions in a
   595   // "desirability" order.
   596   int sort_index() {
   597     return _sort_index;
   598   }
   599   void set_sort_index(int i) {
   600     _sort_index = i;
   601   }
   603   void init_top_at_conc_mark_count() {
   604     _top_at_conc_mark_count = bottom();
   605   }
   607   void set_top_at_conc_mark_count(HeapWord *cur) {
   608     assert(bottom() <= cur && cur <= end(), "Sanity.");
   609     _top_at_conc_mark_count = cur;
   610   }
   612   HeapWord* top_at_conc_mark_count() {
   613     return _top_at_conc_mark_count;
   614   }
   616   void reset_during_compaction() {
   617     guarantee( isHumongous() && startsHumongous(),
   618                "should only be called for humongous regions");
   620     zero_marked_bytes();
   621     init_top_at_mark_start();
   622   }
   624   // <PREDICTION>
   625   void calc_gc_efficiency(void);
   626   double gc_efficiency() { return _gc_efficiency;}
   627   // </PREDICTION>
   629   bool is_young() const     { return _young_type != NotYoung; }
   630   bool is_survivor() const  { return _young_type == Survivor; }
   632   int  young_index_in_cset() const { return _young_index_in_cset; }
   633   void set_young_index_in_cset(int index) {
   634     assert( (index == -1) || is_young(), "pre-condition" );
   635     _young_index_in_cset = index;
   636   }
   638   int age_in_surv_rate_group() {
   639     assert( _surv_rate_group != NULL, "pre-condition" );
   640     assert( _age_index > -1, "pre-condition" );
   641     return _surv_rate_group->age_in_group(_age_index);
   642   }
   644   void record_surv_words_in_group(size_t words_survived) {
   645     assert( _surv_rate_group != NULL, "pre-condition" );
   646     assert( _age_index > -1, "pre-condition" );
   647     int age_in_group = age_in_surv_rate_group();
   648     _surv_rate_group->record_surviving_words(age_in_group, words_survived);
   649   }
   651   int age_in_surv_rate_group_cond() {
   652     if (_surv_rate_group != NULL)
   653       return age_in_surv_rate_group();
   654     else
   655       return -1;
   656   }
   658   SurvRateGroup* surv_rate_group() {
   659     return _surv_rate_group;
   660   }
   662   void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
   663     assert( surv_rate_group != NULL, "pre-condition" );
   664     assert( _surv_rate_group == NULL, "pre-condition" );
   665     assert( is_young(), "pre-condition" );
   667     _surv_rate_group = surv_rate_group;
   668     _age_index = surv_rate_group->next_age_index();
   669   }
   671   void uninstall_surv_rate_group() {
   672     if (_surv_rate_group != NULL) {
   673       assert( _age_index > -1, "pre-condition" );
   674       assert( is_young(), "pre-condition" );
   676       _surv_rate_group = NULL;
   677       _age_index = -1;
   678     } else {
   679       assert( _age_index == -1, "pre-condition" );
   680     }
   681   }
   683   void set_young() { set_young_type(Young); }
   685   void set_survivor() { set_young_type(Survivor); }
   687   void set_not_young() { set_young_type(NotYoung); }
   689   // Determine if an object has been allocated since the last
   690   // mark performed by the collector. This returns true iff the object
   691   // is within the unmarked area of the region.
   692   bool obj_allocated_since_prev_marking(oop obj) const {
   693     return (HeapWord *) obj >= prev_top_at_mark_start();
   694   }
   695   bool obj_allocated_since_next_marking(oop obj) const {
   696     return (HeapWord *) obj >= next_top_at_mark_start();
   697   }
   699   // For parallel heapRegion traversal.
   700   bool claimHeapRegion(int claimValue);
   701   jint claim_value() { return _claimed; }
   702   // Use this carefully: only when you're sure no one is claiming...
   703   void set_claim_value(int claimValue) { _claimed = claimValue; }
   705   // Returns the "evacuation_failed" property of the region.
   706   bool evacuation_failed() { return _evacuation_failed; }
   708   // Sets the "evacuation_failed" property of the region.
   709   void set_evacuation_failed(bool b) {
   710     _evacuation_failed = b;
   712     if (b) {
   713       init_top_at_conc_mark_count();
   714       _next_marked_bytes = 0;
   715     }
   716   }
   718   // Requires that "mr" be entirely within the region.
   719   // Apply "cl->do_object" to all objects that intersect with "mr".
   720   // If the iteration encounters an unparseable portion of the region,
   721   // or if "cl->abort()" is true after a closure application,
   722   // terminate the iteration and return the address of the start of the
   723   // subregion that isn't done.  (The two can be distinguished by querying
   724   // "cl->abort()".)  Return of "NULL" indicates that the iteration
   725   // completed.
   726   HeapWord*
   727   object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
   729   HeapWord*
   730   oops_on_card_seq_iterate_careful(MemRegion mr,
   731                                    FilterOutOfRegionClosure* cl);
   733   // The region "mr" is entirely in "this", and starts and ends at block
   734   // boundaries. The caller declares that all the contained blocks are
   735   // coalesced into one.
   736   void declare_filled_region_to_BOT(MemRegion mr) {
   737     _offsets.single_block(mr.start(), mr.end());
   738   }
   740   // A version of block start that is guaranteed to find *some* block
   741   // boundary at or before "p", but does not object iteration, and may
   742   // therefore be used safely when the heap is unparseable.
   743   HeapWord* block_start_careful(const void* p) const {
   744     return _offsets.block_start_careful(p);
   745   }
   747   // Requires that "addr" is within the region.  Returns the start of the
   748   // first ("careful") block that starts at or after "addr", or else the
   749   // "end" of the region if there is no such block.
   750   HeapWord* next_block_start_careful(HeapWord* addr);
   752   // Returns the zero-fill-state of the current region.
   753   ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
   754   bool zero_fill_is_allocated() { return _zfs == Allocated; }
   755   Thread* zero_filler() { return _zero_filler; }
   757   // Indicate that the contents of the region are unknown, and therefore
   758   // might require zero-filling.
   759   void set_zero_fill_needed() {
   760     set_zero_fill_state_work(NotZeroFilled);
   761   }
   762   void set_zero_fill_in_progress(Thread* t) {
   763     set_zero_fill_state_work(ZeroFilling);
   764     _zero_filler = t;
   765   }
   766   void set_zero_fill_complete();
   767   void set_zero_fill_allocated() {
   768     set_zero_fill_state_work(Allocated);
   769   }
   771   void set_zero_fill_state_work(ZeroFillState zfs);
   773   // This is called when a full collection shrinks the heap.
   774   // We want to set the heap region to a value which says
   775   // it is no longer part of the heap.  For now, we'll let "NotZF" fill
   776   // that role.
   777   void reset_zero_fill() {
   778     set_zero_fill_state_work(NotZeroFilled);
   779     _zero_filler = NULL;
   780   }
   782   size_t recorded_rs_length() const        { return _recorded_rs_length; }
   783   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
   784   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
   786   void set_recorded_rs_length(size_t rs_length) {
   787     _recorded_rs_length = rs_length;
   788   }
   790   void set_predicted_elapsed_time_ms(double ms) {
   791     _predicted_elapsed_time_ms = ms;
   792   }
   794   void set_predicted_bytes_to_copy(size_t bytes) {
   795     _predicted_bytes_to_copy = bytes;
   796   }
   798 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
   799   virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
   800   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
   802   CompactibleSpace* next_compaction_space() const;
   804   virtual void reset_after_compaction();
   806   void print() const;
   807   void print_on(outputStream* st) const;
   809   // use_prev_marking == true  -> use "prev" marking information,
   810   // use_prev_marking == false -> use "next" marking information
   811   // NOTE: Only the "prev" marking information is guaranteed to be
   812   // consistent most of the time, so most calls to this should use
   813   // use_prev_marking == true. Currently, there is only one case where
   814   // this is called with use_prev_marking == false, which is to verify
   815   // the "next" marking information at the end of remark.
   816   void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
   818   // Override; it uses the "prev" marking information
   819   virtual void verify(bool allow_dirty) const;
   821 #ifdef DEBUG
   822   HeapWord* allocate(size_t size);
   823 #endif
   824 };
   826 // HeapRegionClosure is used for iterating over regions.
   827 // Terminates the iteration when the "doHeapRegion" method returns "true".
   828 class HeapRegionClosure : public StackObj {
   829   friend class HeapRegionSeq;
   830   friend class G1CollectedHeap;
   832   bool _complete;
   833   void incomplete() { _complete = false; }
   835  public:
   836   HeapRegionClosure(): _complete(true) {}
   838   // Typically called on each region until it returns true.
   839   virtual bool doHeapRegion(HeapRegion* r) = 0;
   841   // True after iteration if the closure was applied to all heap regions
   842   // and returned "false" in all cases.
   843   bool complete() { return _complete; }
   844 };
   846 // A linked lists of heap regions.  It leaves the "next" field
   847 // unspecified; that's up to subtypes.
   848 class RegionList VALUE_OBJ_CLASS_SPEC {
   849 protected:
   850   virtual HeapRegion* get_next(HeapRegion* chr) = 0;
   851   virtual void set_next(HeapRegion* chr,
   852                         HeapRegion* new_next) = 0;
   854   HeapRegion* _hd;
   855   HeapRegion* _tl;
   856   size_t _sz;
   858   // Protected constructor because this type is only meaningful
   859   // when the _get/_set next functions are defined.
   860   RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
   861 public:
   862   void reset() {
   863     _hd = NULL;
   864     _tl = NULL;
   865     _sz = 0;
   866   }
   867   HeapRegion* hd() { return _hd; }
   868   HeapRegion* tl() { return _tl; }
   869   size_t sz() { return _sz; }
   870   size_t length();
   872   bool well_formed() {
   873     return
   874       ((hd() == NULL && tl() == NULL && sz() == 0)
   875        || (hd() != NULL && tl() != NULL && sz() > 0))
   876       && (sz() == length());
   877   }
   878   virtual void insert_before_head(HeapRegion* r);
   879   void prepend_list(RegionList* new_list);
   880   virtual HeapRegion* pop();
   881   void dec_sz() { _sz--; }
   882   // Requires that "r" is an element of the list, and is not the tail.
   883   void delete_after(HeapRegion* r);
   884 };
   886 class EmptyNonHRegionList: public RegionList {
   887 protected:
   888   // Protected constructor because this type is only meaningful
   889   // when the _get/_set next functions are defined.
   890   EmptyNonHRegionList() : RegionList() {}
   892 public:
   893   void insert_before_head(HeapRegion* r) {
   894     //    assert(r->is_empty(), "Better be empty");
   895     assert(!r->isHumongous(), "Better not be humongous.");
   896     RegionList::insert_before_head(r);
   897   }
   898   void prepend_list(EmptyNonHRegionList* new_list) {
   899     //    assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
   900     //     "Better be empty");
   901     assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
   902            "Better not be humongous.");
   903     //    assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
   904     //     "Better be empty");
   905     assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
   906            "Better not be humongous.");
   907     RegionList::prepend_list(new_list);
   908   }
   909 };
   911 class UncleanRegionList: public EmptyNonHRegionList {
   912 public:
   913   HeapRegion* get_next(HeapRegion* hr) {
   914     return hr->next_from_unclean_list();
   915   }
   916   void set_next(HeapRegion* hr, HeapRegion* new_next) {
   917     hr->set_next_on_unclean_list(new_next);
   918   }
   920   UncleanRegionList() : EmptyNonHRegionList() {}
   922   void insert_before_head(HeapRegion* r) {
   923     assert(!r->is_on_free_list(),
   924            "Better not already be on free list");
   925     assert(!r->is_on_unclean_list(),
   926            "Better not already be on unclean list");
   927     r->set_zero_fill_needed();
   928     r->set_on_unclean_list(true);
   929     EmptyNonHRegionList::insert_before_head(r);
   930   }
   931   void prepend_list(UncleanRegionList* new_list) {
   932     assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
   933            "Better not already be on free list");
   934     assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
   935            "Better already be marked as on unclean list");
   936     assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
   937            "Better not already be on free list");
   938     assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
   939            "Better already be marked as on unclean list");
   940     EmptyNonHRegionList::prepend_list(new_list);
   941   }
   942   HeapRegion* pop() {
   943     HeapRegion* res = RegionList::pop();
   944     if (res != NULL) res->set_on_unclean_list(false);
   945     return res;
   946   }
   947 };
   949 // Local Variables: ***
   950 // c-indentation-style: gnu ***
   951 // End: ***
   953 #endif // SERIALGC

mercurial