src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Tue, 28 Aug 2012 15:20:08 -0700

author
johnc
date
Tue, 28 Aug 2012 15:20:08 -0700
changeset 4016
c9814fadeb38
parent 4015
bb3f6194fedb
child 4037
da91efe96a93
permissions
-rw-r--r--

7041879: G1: introduce stress testing parameter to cause frequent evacuation failures
Summary: Add the flags G1EvacuationFailureALot flag (and supporting flags) to force trigger evacuation failures. The support flags control how often to trigger an evacuation failure and during which types of evacuation pause. This functionality is analogous to that of PromotionFailureALot for the other collectors.
Reviewed-by: brutisso

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
    28 #include "gc_implementation/g1/concurrentMark.hpp"
    29 #include "gc_implementation/g1/g1AllocRegion.hpp"
    30 #include "gc_implementation/g1/g1HRPrinter.hpp"
    31 #include "gc_implementation/g1/g1RemSet.hpp"
    32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
    33 #include "gc_implementation/g1/heapRegionSeq.hpp"
    34 #include "gc_implementation/g1/heapRegionSets.hpp"
    35 #include "gc_implementation/shared/hSpaceCounters.hpp"
    36 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
    37 #include "memory/barrierSet.hpp"
    38 #include "memory/memRegion.hpp"
    39 #include "memory/sharedHeap.hpp"
    41 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
    42 // It uses the "Garbage First" heap organization and algorithm, which
    43 // may combine concurrent marking with parallel, incremental compaction of
    44 // heap subsets that will yield large amounts of garbage.
    46 class HeapRegion;
    47 class HRRSCleanupTask;
    48 class PermanentGenerationSpec;
    49 class GenerationSpec;
    50 class OopsInHeapRegionClosure;
    51 class G1ScanHeapEvacClosure;
    52 class ObjectClosure;
    53 class SpaceClosure;
    54 class CompactibleSpaceClosure;
    55 class Space;
    56 class G1CollectorPolicy;
    57 class GenRemSet;
    58 class G1RemSet;
    59 class HeapRegionRemSetIterator;
    60 class ConcurrentMark;
    61 class ConcurrentMarkThread;
    62 class ConcurrentG1Refine;
    63 class GenerationCounters;
    65 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
    66 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
    68 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
    69 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
    71 enum GCAllocPurpose {
    72   GCAllocForTenured,
    73   GCAllocForSurvived,
    74   GCAllocPurposeCount
    75 };
    77 class YoungList : public CHeapObj<mtGC> {
    78 private:
    79   G1CollectedHeap* _g1h;
    81   HeapRegion* _head;
    83   HeapRegion* _survivor_head;
    84   HeapRegion* _survivor_tail;
    86   HeapRegion* _curr;
    88   uint        _length;
    89   uint        _survivor_length;
    91   size_t      _last_sampled_rs_lengths;
    92   size_t      _sampled_rs_lengths;
    94   void         empty_list(HeapRegion* list);
    96 public:
    97   YoungList(G1CollectedHeap* g1h);
    99   void         push_region(HeapRegion* hr);
   100   void         add_survivor_region(HeapRegion* hr);
   102   void         empty_list();
   103   bool         is_empty() { return _length == 0; }
   104   uint         length() { return _length; }
   105   uint         survivor_length() { return _survivor_length; }
   107   // Currently we do not keep track of the used byte sum for the
   108   // young list and the survivors and it'd be quite a lot of work to
   109   // do so. When we'll eventually replace the young list with
   110   // instances of HeapRegionLinkedList we'll get that for free. So,
   111   // we'll report the more accurate information then.
   112   size_t       eden_used_bytes() {
   113     assert(length() >= survivor_length(), "invariant");
   114     return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes;
   115   }
   116   size_t       survivor_used_bytes() {
   117     return (size_t) survivor_length() * HeapRegion::GrainBytes;
   118   }
   120   void rs_length_sampling_init();
   121   bool rs_length_sampling_more();
   122   void rs_length_sampling_next();
   124   void reset_sampled_info() {
   125     _last_sampled_rs_lengths =   0;
   126   }
   127   size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
   129   // for development purposes
   130   void reset_auxilary_lists();
   131   void clear() { _head = NULL; _length = 0; }
   133   void clear_survivors() {
   134     _survivor_head    = NULL;
   135     _survivor_tail    = NULL;
   136     _survivor_length  = 0;
   137   }
   139   HeapRegion* first_region() { return _head; }
   140   HeapRegion* first_survivor_region() { return _survivor_head; }
   141   HeapRegion* last_survivor_region() { return _survivor_tail; }
   143   // debugging
   144   bool          check_list_well_formed();
   145   bool          check_list_empty(bool check_sample = true);
   146   void          print();
   147 };
   149 class MutatorAllocRegion : public G1AllocRegion {
   150 protected:
   151   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
   152   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
   153 public:
   154   MutatorAllocRegion()
   155     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
   156 };
   158 // The G1 STW is alive closure.
   159 // An instance is embedded into the G1CH and used as the
   160 // (optional) _is_alive_non_header closure in the STW
   161 // reference processor. It is also extensively used during
   162 // refence processing during STW evacuation pauses.
   163 class G1STWIsAliveClosure: public BoolObjectClosure {
   164   G1CollectedHeap* _g1;
   165 public:
   166   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
   167   void do_object(oop p) { assert(false, "Do not call."); }
   168   bool do_object_b(oop p);
   169 };
   171 class SurvivorGCAllocRegion : public G1AllocRegion {
   172 protected:
   173   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
   174   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
   175 public:
   176   SurvivorGCAllocRegion()
   177   : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
   178 };
   180 class OldGCAllocRegion : public G1AllocRegion {
   181 protected:
   182   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
   183   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
   184 public:
   185   OldGCAllocRegion()
   186   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
   187 };
   189 class RefineCardTableEntryClosure;
   191 class G1CollectedHeap : public SharedHeap {
   192   friend class VM_G1CollectForAllocation;
   193   friend class VM_GenCollectForPermanentAllocation;
   194   friend class VM_G1CollectFull;
   195   friend class VM_G1IncCollectionPause;
   196   friend class VMStructs;
   197   friend class MutatorAllocRegion;
   198   friend class SurvivorGCAllocRegion;
   199   friend class OldGCAllocRegion;
   201   // Closures used in implementation.
   202   template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
   203   friend class G1ParCopyClosure;
   204   friend class G1IsAliveClosure;
   205   friend class G1EvacuateFollowersClosure;
   206   friend class G1ParScanThreadState;
   207   friend class G1ParScanClosureSuper;
   208   friend class G1ParEvacuateFollowersClosure;
   209   friend class G1ParTask;
   210   friend class G1FreeGarbageRegionClosure;
   211   friend class RefineCardTableEntryClosure;
   212   friend class G1PrepareCompactClosure;
   213   friend class RegionSorter;
   214   friend class RegionResetter;
   215   friend class CountRCClosure;
   216   friend class EvacPopObjClosure;
   217   friend class G1ParCleanupCTTask;
   219   // Other related classes.
   220   friend class G1MarkSweep;
   222 private:
   223   // The one and only G1CollectedHeap, so static functions can find it.
   224   static G1CollectedHeap* _g1h;
   226   static size_t _humongous_object_threshold_in_words;
   228   // Storage for the G1 heap (excludes the permanent generation).
   229   VirtualSpace _g1_storage;
   230   MemRegion    _g1_reserved;
   232   // The part of _g1_storage that is currently committed.
   233   MemRegion _g1_committed;
   235   // The master free list. It will satisfy all new region allocations.
   236   MasterFreeRegionList      _free_list;
   238   // The secondary free list which contains regions that have been
   239   // freed up during the cleanup process. This will be appended to the
   240   // master free list when appropriate.
   241   SecondaryFreeRegionList   _secondary_free_list;
   243   // It keeps track of the old regions.
   244   MasterOldRegionSet        _old_set;
   246   // It keeps track of the humongous regions.
   247   MasterHumongousRegionSet  _humongous_set;
   249   // The number of regions we could create by expansion.
   250   uint _expansion_regions;
   252   // The block offset table for the G1 heap.
   253   G1BlockOffsetSharedArray* _bot_shared;
   255   // Tears down the region sets / lists so that they are empty and the
   256   // regions on the heap do not belong to a region set / list. The
   257   // only exception is the humongous set which we leave unaltered. If
   258   // free_list_only is true, it will only tear down the master free
   259   // list. It is called before a Full GC (free_list_only == false) or
   260   // before heap shrinking (free_list_only == true).
   261   void tear_down_region_sets(bool free_list_only);
   263   // Rebuilds the region sets / lists so that they are repopulated to
   264   // reflect the contents of the heap. The only exception is the
   265   // humongous set which was not torn down in the first place. If
   266   // free_list_only is true, it will only rebuild the master free
   267   // list. It is called after a Full GC (free_list_only == false) or
   268   // after heap shrinking (free_list_only == true).
   269   void rebuild_region_sets(bool free_list_only);
   271   // The sequence of all heap regions in the heap.
   272   HeapRegionSeq _hrs;
   274   // Alloc region used to satisfy mutator allocation requests.
   275   MutatorAllocRegion _mutator_alloc_region;
   277   // Alloc region used to satisfy allocation requests by the GC for
   278   // survivor objects.
   279   SurvivorGCAllocRegion _survivor_gc_alloc_region;
   281   // PLAB sizing policy for survivors.
   282   PLABStats _survivor_plab_stats;
   284   // Alloc region used to satisfy allocation requests by the GC for
   285   // old objects.
   286   OldGCAllocRegion _old_gc_alloc_region;
   288   // PLAB sizing policy for tenured objects.
   289   PLABStats _old_plab_stats;
   291   PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
   292     PLABStats* stats = NULL;
   294     switch (purpose) {
   295     case GCAllocForSurvived:
   296       stats = &_survivor_plab_stats;
   297       break;
   298     case GCAllocForTenured:
   299       stats = &_old_plab_stats;
   300       break;
   301     default:
   302       assert(false, "unrecognized GCAllocPurpose");
   303     }
   305     return stats;
   306   }
   308   // The last old region we allocated to during the last GC.
   309   // Typically, it is not full so we should re-use it during the next GC.
   310   HeapRegion* _retained_old_gc_alloc_region;
   312   // It specifies whether we should attempt to expand the heap after a
   313   // region allocation failure. If heap expansion fails we set this to
   314   // false so that we don't re-attempt the heap expansion (it's likely
   315   // that subsequent expansion attempts will also fail if one fails).
   316   // Currently, it is only consulted during GC and it's reset at the
   317   // start of each GC.
   318   bool _expand_heap_after_alloc_failure;
   320   // It resets the mutator alloc region before new allocations can take place.
   321   void init_mutator_alloc_region();
   323   // It releases the mutator alloc region.
   324   void release_mutator_alloc_region();
   326   // It initializes the GC alloc regions at the start of a GC.
   327   void init_gc_alloc_regions();
   329   // It releases the GC alloc regions at the end of a GC.
   330   void release_gc_alloc_regions();
   332   // It does any cleanup that needs to be done on the GC alloc regions
   333   // before a Full GC.
   334   void abandon_gc_alloc_regions();
   336   // Helper for monitoring and management support.
   337   G1MonitoringSupport* _g1mm;
   339   // Determines PLAB size for a particular allocation purpose.
   340   size_t desired_plab_sz(GCAllocPurpose purpose);
   342   // Outside of GC pauses, the number of bytes used in all regions other
   343   // than the current allocation region.
   344   size_t _summary_bytes_used;
   346   // This is used for a quick test on whether a reference points into
   347   // the collection set or not. Basically, we have an array, with one
   348   // byte per region, and that byte denotes whether the corresponding
   349   // region is in the collection set or not. The entry corresponding
   350   // the bottom of the heap, i.e., region 0, is pointed to by
   351   // _in_cset_fast_test_base.  The _in_cset_fast_test field has been
   352   // biased so that it actually points to address 0 of the address
   353   // space, to make the test as fast as possible (we can simply shift
   354   // the address to address into it, instead of having to subtract the
   355   // bottom of the heap from the address before shifting it; basically
   356   // it works in the same way the card table works).
   357   bool* _in_cset_fast_test;
   359   // The allocated array used for the fast test on whether a reference
   360   // points into the collection set or not. This field is also used to
   361   // free the array.
   362   bool* _in_cset_fast_test_base;
   364   // The length of the _in_cset_fast_test_base array.
   365   uint _in_cset_fast_test_length;
   367   volatile unsigned _gc_time_stamp;
   369   size_t* _surviving_young_words;
   371   G1HRPrinter _hr_printer;
   373   void setup_surviving_young_words();
   374   void update_surviving_young_words(size_t* surv_young_words);
   375   void cleanup_surviving_young_words();
   377   // It decides whether an explicit GC should start a concurrent cycle
   378   // instead of doing a STW GC. Currently, a concurrent cycle is
   379   // explicitly started if:
   380   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
   381   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
   382   // (c) cause == _g1_humongous_allocation
   383   bool should_do_concurrent_full_gc(GCCause::Cause cause);
   385   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
   386   // concurrent cycles) we have started.
   387   volatile unsigned int _old_marking_cycles_started;
   389   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
   390   // concurrent cycles) we have completed.
   391   volatile unsigned int _old_marking_cycles_completed;
   393   // This is a non-product method that is helpful for testing. It is
   394   // called at the end of a GC and artificially expands the heap by
   395   // allocating a number of dead regions. This way we can induce very
   396   // frequent marking cycles and stress the cleanup / concurrent
   397   // cleanup code more (as all the regions that will be allocated by
   398   // this method will be found dead by the marking cycle).
   399   void allocate_dummy_regions() PRODUCT_RETURN;
   401   // Clear RSets after a compaction. It also resets the GC time stamps.
   402   void clear_rsets_post_compaction();
   404   // If the HR printer is active, dump the state of the regions in the
   405   // heap after a compaction.
   406   void print_hrs_post_compaction();
   408   double verify(bool guard, const char* msg);
   409   void verify_before_gc();
   410   void verify_after_gc();
   412   // These are macros so that, if the assert fires, we get the correct
   413   // line number, file, etc.
   415 #define heap_locking_asserts_err_msg(_extra_message_)                         \
   416   err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",    \
   417           (_extra_message_),                                                  \
   418           BOOL_TO_STR(Heap_lock->owned_by_self()),                            \
   419           BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),               \
   420           BOOL_TO_STR(Thread::current()->is_VM_thread()))
   422 #define assert_heap_locked()                                                  \
   423   do {                                                                        \
   424     assert(Heap_lock->owned_by_self(),                                        \
   425            heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
   426   } while (0)
   428 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
   429   do {                                                                        \
   430     assert(Heap_lock->owned_by_self() ||                                      \
   431            (SafepointSynchronize::is_at_safepoint() &&                        \
   432              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
   433            heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
   434                                         "should be at a safepoint"));         \
   435   } while (0)
   437 #define assert_heap_locked_and_not_at_safepoint()                             \
   438   do {                                                                        \
   439     assert(Heap_lock->owned_by_self() &&                                      \
   440                                     !SafepointSynchronize::is_at_safepoint(), \
   441           heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
   442                                        "should not be at a safepoint"));      \
   443   } while (0)
   445 #define assert_heap_not_locked()                                              \
   446   do {                                                                        \
   447     assert(!Heap_lock->owned_by_self(),                                       \
   448         heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
   449   } while (0)
   451 #define assert_heap_not_locked_and_not_at_safepoint()                         \
   452   do {                                                                        \
   453     assert(!Heap_lock->owned_by_self() &&                                     \
   454                                     !SafepointSynchronize::is_at_safepoint(), \
   455       heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
   456                                    "should not be at a safepoint"));          \
   457   } while (0)
   459 #define assert_at_safepoint(_should_be_vm_thread_)                            \
   460   do {                                                                        \
   461     assert(SafepointSynchronize::is_at_safepoint() &&                         \
   462               ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
   463            heap_locking_asserts_err_msg("should be at a safepoint"));         \
   464   } while (0)
   466 #define assert_not_at_safepoint()                                             \
   467   do {                                                                        \
   468     assert(!SafepointSynchronize::is_at_safepoint(),                          \
   469            heap_locking_asserts_err_msg("should not be at a safepoint"));     \
   470   } while (0)
   472 protected:
   474   // The young region list.
   475   YoungList*  _young_list;
   477   // The current policy object for the collector.
   478   G1CollectorPolicy* _g1_policy;
   480   // This is the second level of trying to allocate a new region. If
   481   // new_region() didn't find a region on the free_list, this call will
   482   // check whether there's anything available on the
   483   // secondary_free_list and/or wait for more regions to appear on
   484   // that list, if _free_regions_coming is set.
   485   HeapRegion* new_region_try_secondary_free_list();
   487   // Try to allocate a single non-humongous HeapRegion sufficient for
   488   // an allocation of the given word_size. If do_expand is true,
   489   // attempt to expand the heap if necessary to satisfy the allocation
   490   // request.
   491   HeapRegion* new_region(size_t word_size, bool do_expand);
   493   // Attempt to satisfy a humongous allocation request of the given
   494   // size by finding a contiguous set of free regions of num_regions
   495   // length and remove them from the master free list. Return the
   496   // index of the first region or G1_NULL_HRS_INDEX if the search
   497   // was unsuccessful.
   498   uint humongous_obj_allocate_find_first(uint num_regions,
   499                                          size_t word_size);
   501   // Initialize a contiguous set of free regions of length num_regions
   502   // and starting at index first so that they appear as a single
   503   // humongous region.
   504   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
   505                                                       uint num_regions,
   506                                                       size_t word_size);
   508   // Attempt to allocate a humongous object of the given size. Return
   509   // NULL if unsuccessful.
   510   HeapWord* humongous_obj_allocate(size_t word_size);
   512   // The following two methods, allocate_new_tlab() and
   513   // mem_allocate(), are the two main entry points from the runtime
   514   // into the G1's allocation routines. They have the following
   515   // assumptions:
   516   //
   517   // * They should both be called outside safepoints.
   518   //
   519   // * They should both be called without holding the Heap_lock.
   520   //
   521   // * All allocation requests for new TLABs should go to
   522   //   allocate_new_tlab().
   523   //
   524   // * All non-TLAB allocation requests should go to mem_allocate().
   525   //
   526   // * If either call cannot satisfy the allocation request using the
   527   //   current allocating region, they will try to get a new one. If
   528   //   this fails, they will attempt to do an evacuation pause and
   529   //   retry the allocation.
   530   //
   531   // * If all allocation attempts fail, even after trying to schedule
   532   //   an evacuation pause, allocate_new_tlab() will return NULL,
   533   //   whereas mem_allocate() will attempt a heap expansion and/or
   534   //   schedule a Full GC.
   535   //
   536   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
   537   //   should never be called with word_size being humongous. All
   538   //   humongous allocation requests should go to mem_allocate() which
   539   //   will satisfy them with a special path.
   541   virtual HeapWord* allocate_new_tlab(size_t word_size);
   543   virtual HeapWord* mem_allocate(size_t word_size,
   544                                  bool*  gc_overhead_limit_was_exceeded);
   546   // The following three methods take a gc_count_before_ret
   547   // parameter which is used to return the GC count if the method
   548   // returns NULL. Given that we are required to read the GC count
   549   // while holding the Heap_lock, and these paths will take the
   550   // Heap_lock at some point, it's easier to get them to read the GC
   551   // count while holding the Heap_lock before they return NULL instead
   552   // of the caller (namely: mem_allocate()) having to also take the
   553   // Heap_lock just to read the GC count.
   555   // First-level mutator allocation attempt: try to allocate out of
   556   // the mutator alloc region without taking the Heap_lock. This
   557   // should only be used for non-humongous allocations.
   558   inline HeapWord* attempt_allocation(size_t word_size,
   559                                       unsigned int* gc_count_before_ret);
   561   // Second-level mutator allocation attempt: take the Heap_lock and
   562   // retry the allocation attempt, potentially scheduling a GC
   563   // pause. This should only be used for non-humongous allocations.
   564   HeapWord* attempt_allocation_slow(size_t word_size,
   565                                     unsigned int* gc_count_before_ret);
   567   // Takes the Heap_lock and attempts a humongous allocation. It can
   568   // potentially schedule a GC pause.
   569   HeapWord* attempt_allocation_humongous(size_t word_size,
   570                                          unsigned int* gc_count_before_ret);
   572   // Allocation attempt that should be called during safepoints (e.g.,
   573   // at the end of a successful GC). expect_null_mutator_alloc_region
   574   // specifies whether the mutator alloc region is expected to be NULL
   575   // or not.
   576   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
   577                                        bool expect_null_mutator_alloc_region);
   579   // It dirties the cards that cover the block so that so that the post
   580   // write barrier never queues anything when updating objects on this
   581   // block. It is assumed (and in fact we assert) that the block
   582   // belongs to a young region.
   583   inline void dirty_young_block(HeapWord* start, size_t word_size);
   585   // Allocate blocks during garbage collection. Will ensure an
   586   // allocation region, either by picking one or expanding the
   587   // heap, and then allocate a block of the given size. The block
   588   // may not be a humongous - it must fit into a single heap region.
   589   HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
   591   HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
   592                                     HeapRegion*    alloc_region,
   593                                     bool           par,
   594                                     size_t         word_size);
   596   // Ensure that no further allocations can happen in "r", bearing in mind
   597   // that parallel threads might be attempting allocations.
   598   void par_allocate_remaining_space(HeapRegion* r);
   600   // Allocation attempt during GC for a survivor object / PLAB.
   601   inline HeapWord* survivor_attempt_allocation(size_t word_size);
   603   // Allocation attempt during GC for an old object / PLAB.
   604   inline HeapWord* old_attempt_allocation(size_t word_size);
   606   // These methods are the "callbacks" from the G1AllocRegion class.
   608   // For mutator alloc regions.
   609   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
   610   void retire_mutator_alloc_region(HeapRegion* alloc_region,
   611                                    size_t allocated_bytes);
   613   // For GC alloc regions.
   614   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
   615                                   GCAllocPurpose ap);
   616   void retire_gc_alloc_region(HeapRegion* alloc_region,
   617                               size_t allocated_bytes, GCAllocPurpose ap);
   619   // - if explicit_gc is true, the GC is for a System.gc() or a heap
   620   //   inspection request and should collect the entire heap
   621   // - if clear_all_soft_refs is true, all soft references should be
   622   //   cleared during the GC
   623   // - if explicit_gc is false, word_size describes the allocation that
   624   //   the GC should attempt (at least) to satisfy
   625   // - it returns false if it is unable to do the collection due to the
   626   //   GC locker being active, true otherwise
   627   bool do_collection(bool explicit_gc,
   628                      bool clear_all_soft_refs,
   629                      size_t word_size);
   631   // Callback from VM_G1CollectFull operation.
   632   // Perform a full collection.
   633   void do_full_collection(bool clear_all_soft_refs);
   635   // Resize the heap if necessary after a full collection.  If this is
   636   // after a collect-for allocation, "word_size" is the allocation size,
   637   // and will be considered part of the used portion of the heap.
   638   void resize_if_necessary_after_full_collection(size_t word_size);
   640   // Callback from VM_G1CollectForAllocation operation.
   641   // This function does everything necessary/possible to satisfy a
   642   // failed allocation request (including collection, expansion, etc.)
   643   HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
   645   // Attempting to expand the heap sufficiently
   646   // to support an allocation of the given "word_size".  If
   647   // successful, perform the allocation and return the address of the
   648   // allocated block, or else "NULL".
   649   HeapWord* expand_and_allocate(size_t word_size);
   651   // Process any reference objects discovered during
   652   // an incremental evacuation pause.
   653   void process_discovered_references();
   655   // Enqueue any remaining discovered references
   656   // after processing.
   657   void enqueue_discovered_references();
   659 public:
   661   G1MonitoringSupport* g1mm() {
   662     assert(_g1mm != NULL, "should have been initialized");
   663     return _g1mm;
   664   }
   666   // Expand the garbage-first heap by at least the given size (in bytes!).
   667   // Returns true if the heap was expanded by the requested amount;
   668   // false otherwise.
   669   // (Rounds up to a HeapRegion boundary.)
   670   bool expand(size_t expand_bytes);
   672   // Do anything common to GC's.
   673   virtual void gc_prologue(bool full);
   674   virtual void gc_epilogue(bool full);
   676   // We register a region with the fast "in collection set" test. We
   677   // simply set to true the array slot corresponding to this region.
   678   void register_region_with_in_cset_fast_test(HeapRegion* r) {
   679     assert(_in_cset_fast_test_base != NULL, "sanity");
   680     assert(r->in_collection_set(), "invariant");
   681     uint index = r->hrs_index();
   682     assert(index < _in_cset_fast_test_length, "invariant");
   683     assert(!_in_cset_fast_test_base[index], "invariant");
   684     _in_cset_fast_test_base[index] = true;
   685   }
   687   // This is a fast test on whether a reference points into the
   688   // collection set or not. It does not assume that the reference
   689   // points into the heap; if it doesn't, it will return false.
   690   bool in_cset_fast_test(oop obj) {
   691     assert(_in_cset_fast_test != NULL, "sanity");
   692     if (_g1_committed.contains((HeapWord*) obj)) {
   693       // no need to subtract the bottom of the heap from obj,
   694       // _in_cset_fast_test is biased
   695       uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
   696       bool ret = _in_cset_fast_test[index];
   697       // let's make sure the result is consistent with what the slower
   698       // test returns
   699       assert( ret || !obj_in_cs(obj), "sanity");
   700       assert(!ret ||  obj_in_cs(obj), "sanity");
   701       return ret;
   702     } else {
   703       return false;
   704     }
   705   }
   707   void clear_cset_fast_test() {
   708     assert(_in_cset_fast_test_base != NULL, "sanity");
   709     memset(_in_cset_fast_test_base, false,
   710            (size_t) _in_cset_fast_test_length * sizeof(bool));
   711   }
   713   // This is called at the start of either a concurrent cycle or a Full
   714   // GC to update the number of old marking cycles started.
   715   void increment_old_marking_cycles_started();
   717   // This is called at the end of either a concurrent cycle or a Full
   718   // GC to update the number of old marking cycles completed. Those two
   719   // can happen in a nested fashion, i.e., we start a concurrent
   720   // cycle, a Full GC happens half-way through it which ends first,
   721   // and then the cycle notices that a Full GC happened and ends
   722   // too. The concurrent parameter is a boolean to help us do a bit
   723   // tighter consistency checking in the method. If concurrent is
   724   // false, the caller is the inner caller in the nesting (i.e., the
   725   // Full GC). If concurrent is true, the caller is the outer caller
   726   // in this nesting (i.e., the concurrent cycle). Further nesting is
   727   // not currently supported. The end of this call also notifies
   728   // the FullGCCount_lock in case a Java thread is waiting for a full
   729   // GC to happen (e.g., it called System.gc() with
   730   // +ExplicitGCInvokesConcurrent).
   731   void increment_old_marking_cycles_completed(bool concurrent);
   733   unsigned int old_marking_cycles_completed() {
   734     return _old_marking_cycles_completed;
   735   }
   737   G1HRPrinter* hr_printer() { return &_hr_printer; }
   739 protected:
   741   // Shrink the garbage-first heap by at most the given size (in bytes!).
   742   // (Rounds down to a HeapRegion boundary.)
   743   virtual void shrink(size_t expand_bytes);
   744   void shrink_helper(size_t expand_bytes);
   746   #if TASKQUEUE_STATS
   747   static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
   748   void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
   749   void reset_taskqueue_stats();
   750   #endif // TASKQUEUE_STATS
   752   // Schedule the VM operation that will do an evacuation pause to
   753   // satisfy an allocation request of word_size. *succeeded will
   754   // return whether the VM operation was successful (it did do an
   755   // evacuation pause) or not (another thread beat us to it or the GC
   756   // locker was active). Given that we should not be holding the
   757   // Heap_lock when we enter this method, we will pass the
   758   // gc_count_before (i.e., total_collections()) as a parameter since
   759   // it has to be read while holding the Heap_lock. Currently, both
   760   // methods that call do_collection_pause() release the Heap_lock
   761   // before the call, so it's easy to read gc_count_before just before.
   762   HeapWord* do_collection_pause(size_t       word_size,
   763                                 unsigned int gc_count_before,
   764                                 bool*        succeeded);
   766   // The guts of the incremental collection pause, executed by the vm
   767   // thread. It returns false if it is unable to do the collection due
   768   // to the GC locker being active, true otherwise
   769   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
   771   // Actually do the work of evacuating the collection set.
   772   void evacuate_collection_set();
   774   // The g1 remembered set of the heap.
   775   G1RemSet* _g1_rem_set;
   776   // And it's mod ref barrier set, used to track updates for the above.
   777   ModRefBarrierSet* _mr_bs;
   779   // A set of cards that cover the objects for which the Rsets should be updated
   780   // concurrently after the collection.
   781   DirtyCardQueueSet _dirty_card_queue_set;
   783   // The Heap Region Rem Set Iterator.
   784   HeapRegionRemSetIterator** _rem_set_iterator;
   786   // The closure used to refine a single card.
   787   RefineCardTableEntryClosure* _refine_cte_cl;
   789   // A function to check the consistency of dirty card logs.
   790   void check_ct_logs_at_safepoint();
   792   // A DirtyCardQueueSet that is used to hold cards that contain
   793   // references into the current collection set. This is used to
   794   // update the remembered sets of the regions in the collection
   795   // set in the event of an evacuation failure.
   796   DirtyCardQueueSet _into_cset_dirty_card_queue_set;
   798   // After a collection pause, make the regions in the CS into free
   799   // regions.
   800   void free_collection_set(HeapRegion* cs_head);
   802   // Abandon the current collection set without recording policy
   803   // statistics or updating free lists.
   804   void abandon_collection_set(HeapRegion* cs_head);
   806   // Applies "scan_non_heap_roots" to roots outside the heap,
   807   // "scan_rs" to roots inside the heap (having done "set_region" to
   808   // indicate the region in which the root resides), and does "scan_perm"
   809   // (setting the generation to the perm generation.)  If "scan_rs" is
   810   // NULL, then this step is skipped.  The "worker_i"
   811   // param is for use with parallel roots processing, and should be
   812   // the "i" of the calling parallel worker thread's work(i) function.
   813   // In the sequential case this param will be ignored.
   814   void g1_process_strong_roots(bool collecting_perm_gen,
   815                                ScanningOption so,
   816                                OopClosure* scan_non_heap_roots,
   817                                OopsInHeapRegionClosure* scan_rs,
   818                                OopsInGenClosure* scan_perm,
   819                                int worker_i);
   821   // Apply "blk" to all the weak roots of the system.  These include
   822   // JNI weak roots, the code cache, system dictionary, symbol table,
   823   // string table, and referents of reachable weak refs.
   824   void g1_process_weak_roots(OopClosure* root_closure,
   825                              OopClosure* non_root_closure);
   827   // Frees a non-humongous region by initializing its contents and
   828   // adding it to the free list that's passed as a parameter (this is
   829   // usually a local list which will be appended to the master free
   830   // list later). The used bytes of freed regions are accumulated in
   831   // pre_used. If par is true, the region's RSet will not be freed
   832   // up. The assumption is that this will be done later.
   833   void free_region(HeapRegion* hr,
   834                    size_t* pre_used,
   835                    FreeRegionList* free_list,
   836                    bool par);
   838   // Frees a humongous region by collapsing it into individual regions
   839   // and calling free_region() for each of them. The freed regions
   840   // will be added to the free list that's passed as a parameter (this
   841   // is usually a local list which will be appended to the master free
   842   // list later). The used bytes of freed regions are accumulated in
   843   // pre_used. If par is true, the region's RSet will not be freed
   844   // up. The assumption is that this will be done later.
   845   void free_humongous_region(HeapRegion* hr,
   846                              size_t* pre_used,
   847                              FreeRegionList* free_list,
   848                              HumongousRegionSet* humongous_proxy_set,
   849                              bool par);
   851   // Notifies all the necessary spaces that the committed space has
   852   // been updated (either expanded or shrunk). It should be called
   853   // after _g1_storage is updated.
   854   void update_committed_space(HeapWord* old_end, HeapWord* new_end);
   856   // The concurrent marker (and the thread it runs in.)
   857   ConcurrentMark* _cm;
   858   ConcurrentMarkThread* _cmThread;
   859   bool _mark_in_progress;
   861   // The concurrent refiner.
   862   ConcurrentG1Refine* _cg1r;
   864   // The parallel task queues
   865   RefToScanQueueSet *_task_queues;
   867   // True iff a evacuation has failed in the current collection.
   868   bool _evacuation_failed;
   870   // Set the attribute indicating whether evacuation has failed in the
   871   // current collection.
   872   void set_evacuation_failed(bool b) { _evacuation_failed = b; }
   874   // Failed evacuations cause some logical from-space objects to have
   875   // forwarding pointers to themselves.  Reset them.
   876   void remove_self_forwarding_pointers();
   878   // When one is non-null, so is the other.  Together, they each pair is
   879   // an object with a preserved mark, and its mark value.
   880   GrowableArray<oop>*     _objs_with_preserved_marks;
   881   GrowableArray<markOop>* _preserved_marks_of_objs;
   883   // Preserve the mark of "obj", if necessary, in preparation for its mark
   884   // word being overwritten with a self-forwarding-pointer.
   885   void preserve_mark_if_necessary(oop obj, markOop m);
   887   // The stack of evac-failure objects left to be scanned.
   888   GrowableArray<oop>*    _evac_failure_scan_stack;
   889   // The closure to apply to evac-failure objects.
   891   OopsInHeapRegionClosure* _evac_failure_closure;
   892   // Set the field above.
   893   void
   894   set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
   895     _evac_failure_closure = evac_failure_closure;
   896   }
   898   // Push "obj" on the scan stack.
   899   void push_on_evac_failure_scan_stack(oop obj);
   900   // Process scan stack entries until the stack is empty.
   901   void drain_evac_failure_scan_stack();
   902   // True iff an invocation of "drain_scan_stack" is in progress; to
   903   // prevent unnecessary recursion.
   904   bool _drain_in_progress;
   906   // Do any necessary initialization for evacuation-failure handling.
   907   // "cl" is the closure that will be used to process evac-failure
   908   // objects.
   909   void init_for_evac_failure(OopsInHeapRegionClosure* cl);
   910   // Do any necessary cleanup for evacuation-failure handling data
   911   // structures.
   912   void finalize_for_evac_failure();
   914   // An attempt to evacuate "obj" has failed; take necessary steps.
   915   oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
   916   void handle_evacuation_failure_common(oop obj, markOop m);
   918 #ifndef PRODUCT
   919   // Support for forcing evacuation failures. Analogous to
   920   // PromotionFailureALot for the other collectors.
   922   // Records whether G1EvacuationFailureALot should be in effect
   923   // for the current GC
   924   bool _evacuation_failure_alot_for_current_gc;
   926   // Used to record the GC number for interval checking when
   927   // determining whether G1EvaucationFailureALot is in effect
   928   // for the current GC.
   929   size_t _evacuation_failure_alot_gc_number;
   931   // Count of the number of evacuations between failures.
   932   volatile size_t _evacuation_failure_alot_count;
   934   // Set whether G1EvacuationFailureALot should be in effect
   935   // for the current GC (based upon the type of GC and which
   936   // command line flags are set);
   937   inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young,
   938                                                   bool during_initial_mark,
   939                                                   bool during_marking);
   941   inline void set_evacuation_failure_alot_for_current_gc();
   943   // Return true if it's time to cause an evacuation failure.
   944   inline bool evacuation_should_fail();
   946   // Reset the G1EvacuationFailureALot counters.  Should be called at
   947   // the end of an evacuation pause in which an evacuation failure ocurred.
   948   inline void reset_evacuation_should_fail();
   949 #endif // !PRODUCT
   951   // ("Weak") Reference processing support.
   952   //
   953   // G1 has 2 instances of the referece processor class. One
   954   // (_ref_processor_cm) handles reference object discovery
   955   // and subsequent processing during concurrent marking cycles.
   956   //
   957   // The other (_ref_processor_stw) handles reference object
   958   // discovery and processing during full GCs and incremental
   959   // evacuation pauses.
   960   //
   961   // During an incremental pause, reference discovery will be
   962   // temporarily disabled for _ref_processor_cm and will be
   963   // enabled for _ref_processor_stw. At the end of the evacuation
   964   // pause references discovered by _ref_processor_stw will be
   965   // processed and discovery will be disabled. The previous
   966   // setting for reference object discovery for _ref_processor_cm
   967   // will be re-instated.
   968   //
   969   // At the start of marking:
   970   //  * Discovery by the CM ref processor is verified to be inactive
   971   //    and it's discovered lists are empty.
   972   //  * Discovery by the CM ref processor is then enabled.
   973   //
   974   // At the end of marking:
   975   //  * Any references on the CM ref processor's discovered
   976   //    lists are processed (possibly MT).
   977   //
   978   // At the start of full GC we:
   979   //  * Disable discovery by the CM ref processor and
   980   //    empty CM ref processor's discovered lists
   981   //    (without processing any entries).
   982   //  * Verify that the STW ref processor is inactive and it's
   983   //    discovered lists are empty.
   984   //  * Temporarily set STW ref processor discovery as single threaded.
   985   //  * Temporarily clear the STW ref processor's _is_alive_non_header
   986   //    field.
   987   //  * Finally enable discovery by the STW ref processor.
   988   //
   989   // The STW ref processor is used to record any discovered
   990   // references during the full GC.
   991   //
   992   // At the end of a full GC we:
   993   //  * Enqueue any reference objects discovered by the STW ref processor
   994   //    that have non-live referents. This has the side-effect of
   995   //    making the STW ref processor inactive by disabling discovery.
   996   //  * Verify that the CM ref processor is still inactive
   997   //    and no references have been placed on it's discovered
   998   //    lists (also checked as a precondition during initial marking).
  1000   // The (stw) reference processor...
  1001   ReferenceProcessor* _ref_processor_stw;
  1003   // During reference object discovery, the _is_alive_non_header
  1004   // closure (if non-null) is applied to the referent object to
  1005   // determine whether the referent is live. If so then the
  1006   // reference object does not need to be 'discovered' and can
  1007   // be treated as a regular oop. This has the benefit of reducing
  1008   // the number of 'discovered' reference objects that need to
  1009   // be processed.
  1010   //
  1011   // Instance of the is_alive closure for embedding into the
  1012   // STW reference processor as the _is_alive_non_header field.
  1013   // Supplying a value for the _is_alive_non_header field is
  1014   // optional but doing so prevents unnecessary additions to
  1015   // the discovered lists during reference discovery.
  1016   G1STWIsAliveClosure _is_alive_closure_stw;
  1018   // The (concurrent marking) reference processor...
  1019   ReferenceProcessor* _ref_processor_cm;
  1021   // Instance of the concurrent mark is_alive closure for embedding
  1022   // into the Concurrent Marking reference processor as the
  1023   // _is_alive_non_header field. Supplying a value for the
  1024   // _is_alive_non_header field is optional but doing so prevents
  1025   // unnecessary additions to the discovered lists during reference
  1026   // discovery.
  1027   G1CMIsAliveClosure _is_alive_closure_cm;
  1029   // Cache used by G1CollectedHeap::start_cset_region_for_worker().
  1030   HeapRegion** _worker_cset_start_region;
  1032   // Time stamp to validate the regions recorded in the cache
  1033   // used by G1CollectedHeap::start_cset_region_for_worker().
  1034   // The heap region entry for a given worker is valid iff
  1035   // the associated time stamp value matches the current value
  1036   // of G1CollectedHeap::_gc_time_stamp.
  1037   unsigned int* _worker_cset_start_region_time_stamp;
  1039   enum G1H_process_strong_roots_tasks {
  1040     G1H_PS_filter_satb_buffers,
  1041     G1H_PS_refProcessor_oops_do,
  1042     // Leave this one last.
  1043     G1H_PS_NumElements
  1044   };
  1046   SubTasksDone* _process_strong_tasks;
  1048   volatile bool _free_regions_coming;
  1050 public:
  1052   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
  1054   void set_refine_cte_cl_concurrency(bool concurrent);
  1056   RefToScanQueue *task_queue(int i) const;
  1058   // A set of cards where updates happened during the GC
  1059   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
  1061   // A DirtyCardQueueSet that is used to hold cards that contain
  1062   // references into the current collection set. This is used to
  1063   // update the remembered sets of the regions in the collection
  1064   // set in the event of an evacuation failure.
  1065   DirtyCardQueueSet& into_cset_dirty_card_queue_set()
  1066         { return _into_cset_dirty_card_queue_set; }
  1068   // Create a G1CollectedHeap with the specified policy.
  1069   // Must call the initialize method afterwards.
  1070   // May not return if something goes wrong.
  1071   G1CollectedHeap(G1CollectorPolicy* policy);
  1073   // Initialize the G1CollectedHeap to have the initial and
  1074   // maximum sizes, permanent generation, and remembered and barrier sets
  1075   // specified by the policy object.
  1076   jint initialize();
  1078   // Initialize weak reference processing.
  1079   virtual void ref_processing_init();
  1081   void set_par_threads(uint t) {
  1082     SharedHeap::set_par_threads(t);
  1083     // Done in SharedHeap but oddly there are
  1084     // two _process_strong_tasks's in a G1CollectedHeap
  1085     // so do it here too.
  1086     _process_strong_tasks->set_n_threads(t);
  1089   // Set _n_par_threads according to a policy TBD.
  1090   void set_par_threads();
  1092   void set_n_termination(int t) {
  1093     _process_strong_tasks->set_n_threads(t);
  1096   virtual CollectedHeap::Name kind() const {
  1097     return CollectedHeap::G1CollectedHeap;
  1100   // The current policy object for the collector.
  1101   G1CollectorPolicy* g1_policy() const { return _g1_policy; }
  1103   // Adaptive size policy.  No such thing for g1.
  1104   virtual AdaptiveSizePolicy* size_policy() { return NULL; }
  1106   // The rem set and barrier set.
  1107   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
  1108   ModRefBarrierSet* mr_bs() const { return _mr_bs; }
  1110   // The rem set iterator.
  1111   HeapRegionRemSetIterator* rem_set_iterator(int i) {
  1112     return _rem_set_iterator[i];
  1115   HeapRegionRemSetIterator* rem_set_iterator() {
  1116     return _rem_set_iterator[0];
  1119   unsigned get_gc_time_stamp() {
  1120     return _gc_time_stamp;
  1123   void reset_gc_time_stamp() {
  1124     _gc_time_stamp = 0;
  1125     OrderAccess::fence();
  1126     // Clear the cached CSet starting regions and time stamps.
  1127     // Their validity is dependent on the GC timestamp.
  1128     clear_cset_start_regions();
  1131   void check_gc_time_stamps() PRODUCT_RETURN;
  1133   void increment_gc_time_stamp() {
  1134     ++_gc_time_stamp;
  1135     OrderAccess::fence();
  1138   // Reset the given region's GC timestamp. If it's starts humongous,
  1139   // also reset the GC timestamp of its corresponding
  1140   // continues humongous regions too.
  1141   void reset_gc_time_stamps(HeapRegion* hr);
  1143   void iterate_dirty_card_closure(CardTableEntryClosure* cl,
  1144                                   DirtyCardQueue* into_cset_dcq,
  1145                                   bool concurrent, int worker_i);
  1147   // The shared block offset table array.
  1148   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
  1150   // Reference Processing accessors
  1152   // The STW reference processor....
  1153   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
  1155   // The Concurent Marking reference processor...
  1156   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
  1158   virtual size_t capacity() const;
  1159   virtual size_t used() const;
  1160   // This should be called when we're not holding the heap lock. The
  1161   // result might be a bit inaccurate.
  1162   size_t used_unlocked() const;
  1163   size_t recalculate_used() const;
  1165   // These virtual functions do the actual allocation.
  1166   // Some heaps may offer a contiguous region for shared non-blocking
  1167   // allocation, via inlined code (by exporting the address of the top and
  1168   // end fields defining the extent of the contiguous allocation region.)
  1169   // But G1CollectedHeap doesn't yet support this.
  1171   // Return an estimate of the maximum allocation that could be performed
  1172   // without triggering any collection or expansion activity.  In a
  1173   // generational collector, for example, this is probably the largest
  1174   // allocation that could be supported (without expansion) in the youngest
  1175   // generation.  It is "unsafe" because no locks are taken; the result
  1176   // should be treated as an approximation, not a guarantee, for use in
  1177   // heuristic resizing decisions.
  1178   virtual size_t unsafe_max_alloc();
  1180   virtual bool is_maximal_no_gc() const {
  1181     return _g1_storage.uncommitted_size() == 0;
  1184   // The total number of regions in the heap.
  1185   uint n_regions() { return _hrs.length(); }
  1187   // The max number of regions in the heap.
  1188   uint max_regions() { return _hrs.max_length(); }
  1190   // The number of regions that are completely free.
  1191   uint free_regions() { return _free_list.length(); }
  1193   // The number of regions that are not completely free.
  1194   uint used_regions() { return n_regions() - free_regions(); }
  1196   // The number of regions available for "regular" expansion.
  1197   uint expansion_regions() { return _expansion_regions; }
  1199   // Factory method for HeapRegion instances. It will return NULL if
  1200   // the allocation fails.
  1201   HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
  1203   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
  1204   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
  1205   void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
  1206   void verify_dirty_young_regions() PRODUCT_RETURN;
  1208   // verify_region_sets() performs verification over the region
  1209   // lists. It will be compiled in the product code to be used when
  1210   // necessary (i.e., during heap verification).
  1211   void verify_region_sets();
  1213   // verify_region_sets_optional() is planted in the code for
  1214   // list verification in non-product builds (and it can be enabled in
  1215   // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
  1216 #if HEAP_REGION_SET_FORCE_VERIFY
  1217   void verify_region_sets_optional() {
  1218     verify_region_sets();
  1220 #else // HEAP_REGION_SET_FORCE_VERIFY
  1221   void verify_region_sets_optional() { }
  1222 #endif // HEAP_REGION_SET_FORCE_VERIFY
  1224 #ifdef ASSERT
  1225   bool is_on_master_free_list(HeapRegion* hr) {
  1226     return hr->containing_set() == &_free_list;
  1229   bool is_in_humongous_set(HeapRegion* hr) {
  1230     return hr->containing_set() == &_humongous_set;
  1232 #endif // ASSERT
  1234   // Wrapper for the region list operations that can be called from
  1235   // methods outside this class.
  1237   void secondary_free_list_add_as_tail(FreeRegionList* list) {
  1238     _secondary_free_list.add_as_tail(list);
  1241   void append_secondary_free_list() {
  1242     _free_list.add_as_head(&_secondary_free_list);
  1245   void append_secondary_free_list_if_not_empty_with_lock() {
  1246     // If the secondary free list looks empty there's no reason to
  1247     // take the lock and then try to append it.
  1248     if (!_secondary_free_list.is_empty()) {
  1249       MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  1250       append_secondary_free_list();
  1254   void old_set_remove(HeapRegion* hr) {
  1255     _old_set.remove(hr);
  1258   size_t non_young_capacity_bytes() {
  1259     return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
  1262   void set_free_regions_coming();
  1263   void reset_free_regions_coming();
  1264   bool free_regions_coming() { return _free_regions_coming; }
  1265   void wait_while_free_regions_coming();
  1267   // Determine whether the given region is one that we are using as an
  1268   // old GC alloc region.
  1269   bool is_old_gc_alloc_region(HeapRegion* hr) {
  1270     return hr == _retained_old_gc_alloc_region;
  1273   // Perform a collection of the heap; intended for use in implementing
  1274   // "System.gc".  This probably implies as full a collection as the
  1275   // "CollectedHeap" supports.
  1276   virtual void collect(GCCause::Cause cause);
  1278   // The same as above but assume that the caller holds the Heap_lock.
  1279   void collect_locked(GCCause::Cause cause);
  1281   // This interface assumes that it's being called by the
  1282   // vm thread. It collects the heap assuming that the
  1283   // heap lock is already held and that we are executing in
  1284   // the context of the vm thread.
  1285   virtual void collect_as_vm_thread(GCCause::Cause cause);
  1287   // True iff a evacuation has failed in the most-recent collection.
  1288   bool evacuation_failed() { return _evacuation_failed; }
  1290   // It will free a region if it has allocated objects in it that are
  1291   // all dead. It calls either free_region() or
  1292   // free_humongous_region() depending on the type of the region that
  1293   // is passed to it.
  1294   void free_region_if_empty(HeapRegion* hr,
  1295                             size_t* pre_used,
  1296                             FreeRegionList* free_list,
  1297                             OldRegionSet* old_proxy_set,
  1298                             HumongousRegionSet* humongous_proxy_set,
  1299                             HRRSCleanupTask* hrrs_cleanup_task,
  1300                             bool par);
  1302   // It appends the free list to the master free list and updates the
  1303   // master humongous list according to the contents of the proxy
  1304   // list. It also adjusts the total used bytes according to pre_used
  1305   // (if par is true, it will do so by taking the ParGCRareEvent_lock).
  1306   void update_sets_after_freeing_regions(size_t pre_used,
  1307                                        FreeRegionList* free_list,
  1308                                        OldRegionSet* old_proxy_set,
  1309                                        HumongousRegionSet* humongous_proxy_set,
  1310                                        bool par);
  1312   // Returns "TRUE" iff "p" points into the committed areas of the heap.
  1313   virtual bool is_in(const void* p) const;
  1315   // Return "TRUE" iff the given object address is within the collection
  1316   // set.
  1317   inline bool obj_in_cs(oop obj);
  1319   // Return "TRUE" iff the given object address is in the reserved
  1320   // region of g1 (excluding the permanent generation).
  1321   bool is_in_g1_reserved(const void* p) const {
  1322     return _g1_reserved.contains(p);
  1325   // Returns a MemRegion that corresponds to the space that has been
  1326   // reserved for the heap
  1327   MemRegion g1_reserved() {
  1328     return _g1_reserved;
  1331   // Returns a MemRegion that corresponds to the space that has been
  1332   // committed in the heap
  1333   MemRegion g1_committed() {
  1334     return _g1_committed;
  1337   virtual bool is_in_closed_subset(const void* p) const;
  1339   // This resets the card table to all zeros.  It is used after
  1340   // a collection pause which used the card table to claim cards.
  1341   void cleanUpCardTable();
  1343   // Iteration functions.
  1345   // Iterate over all the ref-containing fields of all objects, calling
  1346   // "cl.do_oop" on each.
  1347   virtual void oop_iterate(OopClosure* cl) {
  1348     oop_iterate(cl, true);
  1350   void oop_iterate(OopClosure* cl, bool do_perm);
  1352   // Same as above, restricted to a memory region.
  1353   virtual void oop_iterate(MemRegion mr, OopClosure* cl) {
  1354     oop_iterate(mr, cl, true);
  1356   void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
  1358   // Iterate over all objects, calling "cl.do_object" on each.
  1359   virtual void object_iterate(ObjectClosure* cl) {
  1360     object_iterate(cl, true);
  1362   virtual void safe_object_iterate(ObjectClosure* cl) {
  1363     object_iterate(cl, true);
  1365   void object_iterate(ObjectClosure* cl, bool do_perm);
  1367   // Iterate over all objects allocated since the last collection, calling
  1368   // "cl.do_object" on each.  The heap must have been initialized properly
  1369   // to support this function, or else this call will fail.
  1370   virtual void object_iterate_since_last_GC(ObjectClosure* cl);
  1372   // Iterate over all spaces in use in the heap, in ascending address order.
  1373   virtual void space_iterate(SpaceClosure* cl);
  1375   // Iterate over heap regions, in address order, terminating the
  1376   // iteration early if the "doHeapRegion" method returns "true".
  1377   void heap_region_iterate(HeapRegionClosure* blk) const;
  1379   // Return the region with the given index. It assumes the index is valid.
  1380   HeapRegion* region_at(uint index) const { return _hrs.at(index); }
  1382   // Divide the heap region sequence into "chunks" of some size (the number
  1383   // of regions divided by the number of parallel threads times some
  1384   // overpartition factor, currently 4).  Assumes that this will be called
  1385   // in parallel by ParallelGCThreads worker threads with discinct worker
  1386   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
  1387   // calls will use the same "claim_value", and that that claim value is
  1388   // different from the claim_value of any heap region before the start of
  1389   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
  1390   // attempting to claim the first region in each chunk, and, if
  1391   // successful, applying the closure to each region in the chunk (and
  1392   // setting the claim value of the second and subsequent regions of the
  1393   // chunk.)  For now requires that "doHeapRegion" always returns "false",
  1394   // i.e., that a closure never attempt to abort a traversal.
  1395   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
  1396                                        uint worker,
  1397                                        uint no_of_par_workers,
  1398                                        jint claim_value);
  1400   // It resets all the region claim values to the default.
  1401   void reset_heap_region_claim_values();
  1403   // Resets the claim values of regions in the current
  1404   // collection set to the default.
  1405   void reset_cset_heap_region_claim_values();
  1407 #ifdef ASSERT
  1408   bool check_heap_region_claim_values(jint claim_value);
  1410   // Same as the routine above but only checks regions in the
  1411   // current collection set.
  1412   bool check_cset_heap_region_claim_values(jint claim_value);
  1413 #endif // ASSERT
  1415   // Clear the cached cset start regions and (more importantly)
  1416   // the time stamps. Called when we reset the GC time stamp.
  1417   void clear_cset_start_regions();
  1419   // Given the id of a worker, obtain or calculate a suitable
  1420   // starting region for iterating over the current collection set.
  1421   HeapRegion* start_cset_region_for_worker(int worker_i);
  1423   // This is a convenience method that is used by the
  1424   // HeapRegionIterator classes to calculate the starting region for
  1425   // each worker so that they do not all start from the same region.
  1426   HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
  1428   // Iterate over the regions (if any) in the current collection set.
  1429   void collection_set_iterate(HeapRegionClosure* blk);
  1431   // As above but starting from region r
  1432   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
  1434   // Returns the first (lowest address) compactible space in the heap.
  1435   virtual CompactibleSpace* first_compactible_space();
  1437   // A CollectedHeap will contain some number of spaces.  This finds the
  1438   // space containing a given address, or else returns NULL.
  1439   virtual Space* space_containing(const void* addr) const;
  1441   // A G1CollectedHeap will contain some number of heap regions.  This
  1442   // finds the region containing a given address, or else returns NULL.
  1443   template <class T>
  1444   inline HeapRegion* heap_region_containing(const T addr) const;
  1446   // Like the above, but requires "addr" to be in the heap (to avoid a
  1447   // null-check), and unlike the above, may return an continuing humongous
  1448   // region.
  1449   template <class T>
  1450   inline HeapRegion* heap_region_containing_raw(const T addr) const;
  1452   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
  1453   // each address in the (reserved) heap is a member of exactly
  1454   // one block.  The defining characteristic of a block is that it is
  1455   // possible to find its size, and thus to progress forward to the next
  1456   // block.  (Blocks may be of different sizes.)  Thus, blocks may
  1457   // represent Java objects, or they might be free blocks in a
  1458   // free-list-based heap (or subheap), as long as the two kinds are
  1459   // distinguishable and the size of each is determinable.
  1461   // Returns the address of the start of the "block" that contains the
  1462   // address "addr".  We say "blocks" instead of "object" since some heaps
  1463   // may not pack objects densely; a chunk may either be an object or a
  1464   // non-object.
  1465   virtual HeapWord* block_start(const void* addr) const;
  1467   // Requires "addr" to be the start of a chunk, and returns its size.
  1468   // "addr + size" is required to be the start of a new chunk, or the end
  1469   // of the active area of the heap.
  1470   virtual size_t block_size(const HeapWord* addr) const;
  1472   // Requires "addr" to be the start of a block, and returns "TRUE" iff
  1473   // the block is an object.
  1474   virtual bool block_is_obj(const HeapWord* addr) const;
  1476   // Does this heap support heap inspection? (+PrintClassHistogram)
  1477   virtual bool supports_heap_inspection() const { return true; }
  1479   // Section on thread-local allocation buffers (TLABs)
  1480   // See CollectedHeap for semantics.
  1482   virtual bool supports_tlab_allocation() const;
  1483   virtual size_t tlab_capacity(Thread* thr) const;
  1484   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
  1486   // Can a compiler initialize a new object without store barriers?
  1487   // This permission only extends from the creation of a new object
  1488   // via a TLAB up to the first subsequent safepoint. If such permission
  1489   // is granted for this heap type, the compiler promises to call
  1490   // defer_store_barrier() below on any slow path allocation of
  1491   // a new object for which such initializing store barriers will
  1492   // have been elided. G1, like CMS, allows this, but should be
  1493   // ready to provide a compensating write barrier as necessary
  1494   // if that storage came out of a non-young region. The efficiency
  1495   // of this implementation depends crucially on being able to
  1496   // answer very efficiently in constant time whether a piece of
  1497   // storage in the heap comes from a young region or not.
  1498   // See ReduceInitialCardMarks.
  1499   virtual bool can_elide_tlab_store_barriers() const {
  1500     return true;
  1503   virtual bool card_mark_must_follow_store() const {
  1504     return true;
  1507   bool is_in_young(const oop obj) {
  1508     HeapRegion* hr = heap_region_containing(obj);
  1509     return hr != NULL && hr->is_young();
  1512 #ifdef ASSERT
  1513   virtual bool is_in_partial_collection(const void* p);
  1514 #endif
  1516   virtual bool is_scavengable(const void* addr);
  1518   // We don't need barriers for initializing stores to objects
  1519   // in the young gen: for the SATB pre-barrier, there is no
  1520   // pre-value that needs to be remembered; for the remembered-set
  1521   // update logging post-barrier, we don't maintain remembered set
  1522   // information for young gen objects.
  1523   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
  1524     return is_in_young(new_obj);
  1527   // Can a compiler elide a store barrier when it writes
  1528   // a permanent oop into the heap?  Applies when the compiler
  1529   // is storing x to the heap, where x->is_perm() is true.
  1530   virtual bool can_elide_permanent_oop_store_barriers() const {
  1531     // At least until perm gen collection is also G1-ified, at
  1532     // which point this should return false.
  1533     return true;
  1536   // Returns "true" iff the given word_size is "very large".
  1537   static bool isHumongous(size_t word_size) {
  1538     // Note this has to be strictly greater-than as the TLABs
  1539     // are capped at the humongous thresold and we want to
  1540     // ensure that we don't try to allocate a TLAB as
  1541     // humongous and that we don't allocate a humongous
  1542     // object in a TLAB.
  1543     return word_size > _humongous_object_threshold_in_words;
  1546   // Update mod union table with the set of dirty cards.
  1547   void updateModUnion();
  1549   // Set the mod union bits corresponding to the given memRegion.  Note
  1550   // that this is always a safe operation, since it doesn't clear any
  1551   // bits.
  1552   void markModUnionRange(MemRegion mr);
  1554   // Records the fact that a marking phase is no longer in progress.
  1555   void set_marking_complete() {
  1556     _mark_in_progress = false;
  1558   void set_marking_started() {
  1559     _mark_in_progress = true;
  1561   bool mark_in_progress() {
  1562     return _mark_in_progress;
  1565   // Print the maximum heap capacity.
  1566   virtual size_t max_capacity() const;
  1568   virtual jlong millis_since_last_gc();
  1570   // Perform any cleanup actions necessary before allowing a verification.
  1571   virtual void prepare_for_verify();
  1573   // Perform verification.
  1575   // vo == UsePrevMarking  -> use "prev" marking information,
  1576   // vo == UseNextMarking -> use "next" marking information
  1577   // vo == UseMarkWord    -> use the mark word in the object header
  1578   //
  1579   // NOTE: Only the "prev" marking information is guaranteed to be
  1580   // consistent most of the time, so most calls to this should use
  1581   // vo == UsePrevMarking.
  1582   // Currently, there is only one case where this is called with
  1583   // vo == UseNextMarking, which is to verify the "next" marking
  1584   // information at the end of remark.
  1585   // Currently there is only one place where this is called with
  1586   // vo == UseMarkWord, which is to verify the marking during a
  1587   // full GC.
  1588   void verify(bool silent, VerifyOption vo);
  1590   // Override; it uses the "prev" marking information
  1591   virtual void verify(bool silent);
  1592   virtual void print_on(outputStream* st) const;
  1593   virtual void print_extended_on(outputStream* st) const;
  1595   virtual void print_gc_threads_on(outputStream* st) const;
  1596   virtual void gc_threads_do(ThreadClosure* tc) const;
  1598   // Override
  1599   void print_tracing_info() const;
  1601   // The following two methods are helpful for debugging RSet issues.
  1602   void print_cset_rsets() PRODUCT_RETURN;
  1603   void print_all_rsets() PRODUCT_RETURN;
  1605   // Convenience function to be used in situations where the heap type can be
  1606   // asserted to be this type.
  1607   static G1CollectedHeap* heap();
  1609   void set_region_short_lived_locked(HeapRegion* hr);
  1610   // add appropriate methods for any other surv rate groups
  1612   YoungList* young_list() { return _young_list; }
  1614   // debugging
  1615   bool check_young_list_well_formed() {
  1616     return _young_list->check_list_well_formed();
  1619   bool check_young_list_empty(bool check_heap,
  1620                               bool check_sample = true);
  1622   // *** Stuff related to concurrent marking.  It's not clear to me that so
  1623   // many of these need to be public.
  1625   // The functions below are helper functions that a subclass of
  1626   // "CollectedHeap" can use in the implementation of its virtual
  1627   // functions.
  1628   // This performs a concurrent marking of the live objects in a
  1629   // bitmap off to the side.
  1630   void doConcurrentMark();
  1632   bool isMarkedPrev(oop obj) const;
  1633   bool isMarkedNext(oop obj) const;
  1635   // Determine if an object is dead, given the object and also
  1636   // the region to which the object belongs. An object is dead
  1637   // iff a) it was not allocated since the last mark and b) it
  1638   // is not marked.
  1640   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
  1641     return
  1642       !hr->obj_allocated_since_prev_marking(obj) &&
  1643       !isMarkedPrev(obj);
  1646   // This function returns true when an object has been
  1647   // around since the previous marking and hasn't yet
  1648   // been marked during this marking.
  1650   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
  1651     return
  1652       !hr->obj_allocated_since_next_marking(obj) &&
  1653       !isMarkedNext(obj);
  1656   // Determine if an object is dead, given only the object itself.
  1657   // This will find the region to which the object belongs and
  1658   // then call the region version of the same function.
  1660   // Added if it is in permanent gen it isn't dead.
  1661   // Added if it is NULL it isn't dead.
  1663   bool is_obj_dead(const oop obj) const {
  1664     const HeapRegion* hr = heap_region_containing(obj);
  1665     if (hr == NULL) {
  1666       if (Universe::heap()->is_in_permanent(obj))
  1667         return false;
  1668       else if (obj == NULL) return false;
  1669       else return true;
  1671     else return is_obj_dead(obj, hr);
  1674   bool is_obj_ill(const oop obj) const {
  1675     const HeapRegion* hr = heap_region_containing(obj);
  1676     if (hr == NULL) {
  1677       if (Universe::heap()->is_in_permanent(obj))
  1678         return false;
  1679       else if (obj == NULL) return false;
  1680       else return true;
  1682     else return is_obj_ill(obj, hr);
  1685   // The methods below are here for convenience and dispatch the
  1686   // appropriate method depending on value of the given VerifyOption
  1687   // parameter. The options for that parameter are:
  1688   //
  1689   // vo == UsePrevMarking -> use "prev" marking information,
  1690   // vo == UseNextMarking -> use "next" marking information,
  1691   // vo == UseMarkWord    -> use mark word from object header
  1693   bool is_obj_dead_cond(const oop obj,
  1694                         const HeapRegion* hr,
  1695                         const VerifyOption vo) const {
  1696     switch (vo) {
  1697     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
  1698     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
  1699     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
  1700     default:                            ShouldNotReachHere();
  1702     return false; // keep some compilers happy
  1705   bool is_obj_dead_cond(const oop obj,
  1706                         const VerifyOption vo) const {
  1707     switch (vo) {
  1708     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
  1709     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
  1710     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
  1711     default:                            ShouldNotReachHere();
  1713     return false; // keep some compilers happy
  1716   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
  1717   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
  1718   bool is_marked(oop obj, VerifyOption vo);
  1719   const char* top_at_mark_start_str(VerifyOption vo);
  1721   // The following is just to alert the verification code
  1722   // that a full collection has occurred and that the
  1723   // remembered sets are no longer up to date.
  1724   bool _full_collection;
  1725   void set_full_collection() { _full_collection = true;}
  1726   void clear_full_collection() {_full_collection = false;}
  1727   bool full_collection() {return _full_collection;}
  1729   ConcurrentMark* concurrent_mark() const { return _cm; }
  1730   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
  1732   // The dirty cards region list is used to record a subset of regions
  1733   // whose cards need clearing. The list if populated during the
  1734   // remembered set scanning and drained during the card table
  1735   // cleanup. Although the methods are reentrant, population/draining
  1736   // phases must not overlap. For synchronization purposes the last
  1737   // element on the list points to itself.
  1738   HeapRegion* _dirty_cards_region_list;
  1739   void push_dirty_cards_region(HeapRegion* hr);
  1740   HeapRegion* pop_dirty_cards_region();
  1742 public:
  1743   void stop_conc_gc_threads();
  1745   size_t pending_card_num();
  1746   size_t cards_scanned();
  1748 protected:
  1749   size_t _max_heap_capacity;
  1750 };
  1752 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
  1753 private:
  1754   bool        _retired;
  1756 public:
  1757   G1ParGCAllocBuffer(size_t gclab_word_size);
  1759   void set_buf(HeapWord* buf) {
  1760     ParGCAllocBuffer::set_buf(buf);
  1761     _retired = false;
  1764   void retire(bool end_of_gc, bool retain) {
  1765     if (_retired)
  1766       return;
  1767     ParGCAllocBuffer::retire(end_of_gc, retain);
  1768     _retired = true;
  1770 };
  1772 class G1ParScanThreadState : public StackObj {
  1773 protected:
  1774   G1CollectedHeap* _g1h;
  1775   RefToScanQueue*  _refs;
  1776   DirtyCardQueue   _dcq;
  1777   CardTableModRefBS* _ct_bs;
  1778   G1RemSet* _g1_rem;
  1780   G1ParGCAllocBuffer  _surviving_alloc_buffer;
  1781   G1ParGCAllocBuffer  _tenured_alloc_buffer;
  1782   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
  1783   ageTable            _age_table;
  1785   size_t           _alloc_buffer_waste;
  1786   size_t           _undo_waste;
  1788   OopsInHeapRegionClosure*      _evac_failure_cl;
  1789   G1ParScanHeapEvacClosure*     _evac_cl;
  1790   G1ParScanPartialArrayClosure* _partial_scan_cl;
  1792   int _hash_seed;
  1793   uint _queue_num;
  1795   size_t _term_attempts;
  1797   double _start;
  1798   double _start_strong_roots;
  1799   double _strong_roots_time;
  1800   double _start_term;
  1801   double _term_time;
  1803   // Map from young-age-index (0 == not young, 1 is youngest) to
  1804   // surviving words. base is what we get back from the malloc call
  1805   size_t* _surviving_young_words_base;
  1806   // this points into the array, as we use the first few entries for padding
  1807   size_t* _surviving_young_words;
  1809 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
  1811   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
  1813   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
  1815   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
  1816   CardTableModRefBS* ctbs()                      { return _ct_bs; }
  1818   template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
  1819     if (!from->is_survivor()) {
  1820       _g1_rem->par_write_ref(from, p, tid);
  1824   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
  1825     // If the new value of the field points to the same region or
  1826     // is the to-space, we don't need to include it in the Rset updates.
  1827     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
  1828       size_t card_index = ctbs()->index_for(p);
  1829       // If the card hasn't been added to the buffer, do it.
  1830       if (ctbs()->mark_card_deferred(card_index)) {
  1831         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
  1836 public:
  1837   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
  1839   ~G1ParScanThreadState() {
  1840     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
  1843   RefToScanQueue*   refs()            { return _refs;             }
  1844   ageTable*         age_table()       { return &_age_table;       }
  1846   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
  1847     return _alloc_buffers[purpose];
  1850   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
  1851   size_t undo_waste() const                      { return _undo_waste; }
  1853 #ifdef ASSERT
  1854   bool verify_ref(narrowOop* ref) const;
  1855   bool verify_ref(oop* ref) const;
  1856   bool verify_task(StarTask ref) const;
  1857 #endif // ASSERT
  1859   template <class T> void push_on_queue(T* ref) {
  1860     assert(verify_ref(ref), "sanity");
  1861     refs()->push(ref);
  1864   template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
  1865     if (G1DeferredRSUpdate) {
  1866       deferred_rs_update(from, p, tid);
  1867     } else {
  1868       immediate_rs_update(from, p, tid);
  1872   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
  1873     HeapWord* obj = NULL;
  1874     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
  1875     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
  1876       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
  1877       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
  1878       alloc_buf->flush_stats_and_retire(_g1h->stats_for_purpose(purpose),
  1879                                         false /* end_of_gc */,
  1880                                         false /* retain */);
  1882       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
  1883       if (buf == NULL) return NULL; // Let caller handle allocation failure.
  1884       // Otherwise.
  1885       alloc_buf->set_word_size(gclab_word_size);
  1886       alloc_buf->set_buf(buf);
  1888       obj = alloc_buf->allocate(word_sz);
  1889       assert(obj != NULL, "buffer was definitely big enough...");
  1890     } else {
  1891       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
  1893     return obj;
  1896   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
  1897     HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
  1898     if (obj != NULL) return obj;
  1899     return allocate_slow(purpose, word_sz);
  1902   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
  1903     if (alloc_buffer(purpose)->contains(obj)) {
  1904       assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
  1905              "should contain whole object");
  1906       alloc_buffer(purpose)->undo_allocation(obj, word_sz);
  1907     } else {
  1908       CollectedHeap::fill_with_object(obj, word_sz);
  1909       add_to_undo_waste(word_sz);
  1913   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
  1914     _evac_failure_cl = evac_failure_cl;
  1916   OopsInHeapRegionClosure* evac_failure_closure() {
  1917     return _evac_failure_cl;
  1920   void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
  1921     _evac_cl = evac_cl;
  1924   void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
  1925     _partial_scan_cl = partial_scan_cl;
  1928   int* hash_seed() { return &_hash_seed; }
  1929   uint queue_num() { return _queue_num; }
  1931   size_t term_attempts() const  { return _term_attempts; }
  1932   void note_term_attempt() { _term_attempts++; }
  1934   void start_strong_roots() {
  1935     _start_strong_roots = os::elapsedTime();
  1937   void end_strong_roots() {
  1938     _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
  1940   double strong_roots_time() const { return _strong_roots_time; }
  1942   void start_term_time() {
  1943     note_term_attempt();
  1944     _start_term = os::elapsedTime();
  1946   void end_term_time() {
  1947     _term_time += (os::elapsedTime() - _start_term);
  1949   double term_time() const { return _term_time; }
  1951   double elapsed_time() const {
  1952     return os::elapsedTime() - _start;
  1955   static void
  1956     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
  1957   void
  1958     print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
  1960   size_t* surviving_young_words() {
  1961     // We add on to hide entry 0 which accumulates surviving words for
  1962     // age -1 regions (i.e. non-young ones)
  1963     return _surviving_young_words;
  1966   void retire_alloc_buffers() {
  1967     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  1968       size_t waste = _alloc_buffers[ap]->words_remaining();
  1969       add_to_alloc_buffer_waste(waste);
  1970       _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
  1971                                                  true /* end_of_gc */,
  1972                                                  false /* retain */);
  1976   template <class T> void deal_with_reference(T* ref_to_scan) {
  1977     if (has_partial_array_mask(ref_to_scan)) {
  1978       _partial_scan_cl->do_oop_nv(ref_to_scan);
  1979     } else {
  1980       // Note: we can use "raw" versions of "region_containing" because
  1981       // "obj_to_scan" is definitely in the heap, and is not in a
  1982       // humongous region.
  1983       HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
  1984       _evac_cl->set_region(r);
  1985       _evac_cl->do_oop_nv(ref_to_scan);
  1989   void deal_with_reference(StarTask ref) {
  1990     assert(verify_task(ref), "sanity");
  1991     if (ref.is_narrow()) {
  1992       deal_with_reference((narrowOop*)ref);
  1993     } else {
  1994       deal_with_reference((oop*)ref);
  1998 public:
  1999   void trim_queue();
  2000 };
  2002 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP

mercurial