src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Tue, 21 Aug 2012 14:10:39 -0700

author
johnc
date
Tue, 21 Aug 2012 14:10:39 -0700
changeset 3998
7383557659bd
parent 3997
f99a36499b8c
child 4015
bb3f6194fedb
permissions
-rw-r--r--

7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/icBuffer.hpp"
    27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
    28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    35 #include "gc_implementation/g1/g1EvacFailure.hpp"
    36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
    37 #include "gc_implementation/g1/g1Log.hpp"
    38 #include "gc_implementation/g1/g1MarkSweep.hpp"
    39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    40 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    41 #include "gc_implementation/g1/heapRegion.inline.hpp"
    42 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    44 #include "gc_implementation/g1/vm_operations_g1.hpp"
    45 #include "gc_implementation/shared/isGCActiveMark.hpp"
    46 #include "memory/gcLocker.inline.hpp"
    47 #include "memory/genOopClosures.inline.hpp"
    48 #include "memory/generationSpec.hpp"
    49 #include "memory/referenceProcessor.hpp"
    50 #include "oops/oop.inline.hpp"
    51 #include "oops/oop.pcgc.inline.hpp"
    52 #include "runtime/aprofiler.hpp"
    53 #include "runtime/vmThread.hpp"
    55 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
    57 // turn it on so that the contents of the young list (scan-only /
    58 // to-be-collected) are printed at "strategic" points before / during
    59 // / after the collection --- this is useful for debugging
    60 #define YOUNG_LIST_VERBOSE 0
    61 // CURRENT STATUS
    62 // This file is under construction.  Search for "FIXME".
    64 // INVARIANTS/NOTES
    65 //
    66 // All allocation activity covered by the G1CollectedHeap interface is
    67 // serialized by acquiring the HeapLock.  This happens in mem_allocate
    68 // and allocate_new_tlab, which are the "entry" points to the
    69 // allocation code from the rest of the JVM.  (Note that this does not
    70 // apply to TLAB allocation, which is not part of this interface: it
    71 // is done by clients of this interface.)
    73 // Notes on implementation of parallelism in different tasks.
    74 //
    75 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
    76 // The number of GC workers is passed to heap_region_par_iterate_chunked().
    77 // It does use run_task() which sets _n_workers in the task.
    78 // G1ParTask executes g1_process_strong_roots() ->
    79 // SharedHeap::process_strong_roots() which calls eventuall to
    80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
    81 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
    82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
    83 //
    85 // Local to this file.
    87 class RefineCardTableEntryClosure: public CardTableEntryClosure {
    88   SuspendibleThreadSet* _sts;
    89   G1RemSet* _g1rs;
    90   ConcurrentG1Refine* _cg1r;
    91   bool _concurrent;
    92 public:
    93   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
    94                               G1RemSet* g1rs,
    95                               ConcurrentG1Refine* cg1r) :
    96     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
    97   {}
    98   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    99     bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
   100     // This path is executed by the concurrent refine or mutator threads,
   101     // concurrently, and so we do not care if card_ptr contains references
   102     // that point into the collection set.
   103     assert(!oops_into_cset, "should be");
   105     if (_concurrent && _sts->should_yield()) {
   106       // Caller will actually yield.
   107       return false;
   108     }
   109     // Otherwise, we finished successfully; return true.
   110     return true;
   111   }
   112   void set_concurrent(bool b) { _concurrent = b; }
   113 };
   116 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
   117   int _calls;
   118   G1CollectedHeap* _g1h;
   119   CardTableModRefBS* _ctbs;
   120   int _histo[256];
   121 public:
   122   ClearLoggedCardTableEntryClosure() :
   123     _calls(0)
   124   {
   125     _g1h = G1CollectedHeap::heap();
   126     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   127     for (int i = 0; i < 256; i++) _histo[i] = 0;
   128   }
   129   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   130     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   131       _calls++;
   132       unsigned char* ujb = (unsigned char*)card_ptr;
   133       int ind = (int)(*ujb);
   134       _histo[ind]++;
   135       *card_ptr = -1;
   136     }
   137     return true;
   138   }
   139   int calls() { return _calls; }
   140   void print_histo() {
   141     gclog_or_tty->print_cr("Card table value histogram:");
   142     for (int i = 0; i < 256; i++) {
   143       if (_histo[i] != 0) {
   144         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
   145       }
   146     }
   147   }
   148 };
   150 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
   151   int _calls;
   152   G1CollectedHeap* _g1h;
   153   CardTableModRefBS* _ctbs;
   154 public:
   155   RedirtyLoggedCardTableEntryClosure() :
   156     _calls(0)
   157   {
   158     _g1h = G1CollectedHeap::heap();
   159     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   160   }
   161   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   162     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   163       _calls++;
   164       *card_ptr = 0;
   165     }
   166     return true;
   167   }
   168   int calls() { return _calls; }
   169 };
   171 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
   172 public:
   173   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   174     *card_ptr = CardTableModRefBS::dirty_card_val();
   175     return true;
   176   }
   177 };
   179 YoungList::YoungList(G1CollectedHeap* g1h) :
   180     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
   181     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
   182   guarantee(check_list_empty(false), "just making sure...");
   183 }
   185 void YoungList::push_region(HeapRegion *hr) {
   186   assert(!hr->is_young(), "should not already be young");
   187   assert(hr->get_next_young_region() == NULL, "cause it should!");
   189   hr->set_next_young_region(_head);
   190   _head = hr;
   192   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
   193   ++_length;
   194 }
   196 void YoungList::add_survivor_region(HeapRegion* hr) {
   197   assert(hr->is_survivor(), "should be flagged as survivor region");
   198   assert(hr->get_next_young_region() == NULL, "cause it should!");
   200   hr->set_next_young_region(_survivor_head);
   201   if (_survivor_head == NULL) {
   202     _survivor_tail = hr;
   203   }
   204   _survivor_head = hr;
   205   ++_survivor_length;
   206 }
   208 void YoungList::empty_list(HeapRegion* list) {
   209   while (list != NULL) {
   210     HeapRegion* next = list->get_next_young_region();
   211     list->set_next_young_region(NULL);
   212     list->uninstall_surv_rate_group();
   213     list->set_not_young();
   214     list = next;
   215   }
   216 }
   218 void YoungList::empty_list() {
   219   assert(check_list_well_formed(), "young list should be well formed");
   221   empty_list(_head);
   222   _head = NULL;
   223   _length = 0;
   225   empty_list(_survivor_head);
   226   _survivor_head = NULL;
   227   _survivor_tail = NULL;
   228   _survivor_length = 0;
   230   _last_sampled_rs_lengths = 0;
   232   assert(check_list_empty(false), "just making sure...");
   233 }
   235 bool YoungList::check_list_well_formed() {
   236   bool ret = true;
   238   uint length = 0;
   239   HeapRegion* curr = _head;
   240   HeapRegion* last = NULL;
   241   while (curr != NULL) {
   242     if (!curr->is_young()) {
   243       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   244                              "incorrectly tagged (y: %d, surv: %d)",
   245                              curr->bottom(), curr->end(),
   246                              curr->is_young(), curr->is_survivor());
   247       ret = false;
   248     }
   249     ++length;
   250     last = curr;
   251     curr = curr->get_next_young_region();
   252   }
   253   ret = ret && (length == _length);
   255   if (!ret) {
   256     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   257     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
   258                            length, _length);
   259   }
   261   return ret;
   262 }
   264 bool YoungList::check_list_empty(bool check_sample) {
   265   bool ret = true;
   267   if (_length != 0) {
   268     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
   269                   _length);
   270     ret = false;
   271   }
   272   if (check_sample && _last_sampled_rs_lengths != 0) {
   273     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   274     ret = false;
   275   }
   276   if (_head != NULL) {
   277     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   278     ret = false;
   279   }
   280   if (!ret) {
   281     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   282   }
   284   return ret;
   285 }
   287 void
   288 YoungList::rs_length_sampling_init() {
   289   _sampled_rs_lengths = 0;
   290   _curr               = _head;
   291 }
   293 bool
   294 YoungList::rs_length_sampling_more() {
   295   return _curr != NULL;
   296 }
   298 void
   299 YoungList::rs_length_sampling_next() {
   300   assert( _curr != NULL, "invariant" );
   301   size_t rs_length = _curr->rem_set()->occupied();
   303   _sampled_rs_lengths += rs_length;
   305   // The current region may not yet have been added to the
   306   // incremental collection set (it gets added when it is
   307   // retired as the current allocation region).
   308   if (_curr->in_collection_set()) {
   309     // Update the collection set policy information for this region
   310     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
   311   }
   313   _curr = _curr->get_next_young_region();
   314   if (_curr == NULL) {
   315     _last_sampled_rs_lengths = _sampled_rs_lengths;
   316     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   317   }
   318 }
   320 void
   321 YoungList::reset_auxilary_lists() {
   322   guarantee( is_empty(), "young list should be empty" );
   323   assert(check_list_well_formed(), "young list should be well formed");
   325   // Add survivor regions to SurvRateGroup.
   326   _g1h->g1_policy()->note_start_adding_survivor_regions();
   327   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   329   int young_index_in_cset = 0;
   330   for (HeapRegion* curr = _survivor_head;
   331        curr != NULL;
   332        curr = curr->get_next_young_region()) {
   333     _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
   335     // The region is a non-empty survivor so let's add it to
   336     // the incremental collection set for the next evacuation
   337     // pause.
   338     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   339     young_index_in_cset += 1;
   340   }
   341   assert((uint) young_index_in_cset == _survivor_length, "post-condition");
   342   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   344   _head   = _survivor_head;
   345   _length = _survivor_length;
   346   if (_survivor_head != NULL) {
   347     assert(_survivor_tail != NULL, "cause it shouldn't be");
   348     assert(_survivor_length > 0, "invariant");
   349     _survivor_tail->set_next_young_region(NULL);
   350   }
   352   // Don't clear the survivor list handles until the start of
   353   // the next evacuation pause - we need it in order to re-tag
   354   // the survivor regions from this evacuation pause as 'young'
   355   // at the start of the next.
   357   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   359   assert(check_list_well_formed(), "young list should be well formed");
   360 }
   362 void YoungList::print() {
   363   HeapRegion* lists[] = {_head,   _survivor_head};
   364   const char* names[] = {"YOUNG", "SURVIVOR"};
   366   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   367     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   368     HeapRegion *curr = lists[list];
   369     if (curr == NULL)
   370       gclog_or_tty->print_cr("  empty");
   371     while (curr != NULL) {
   372       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
   373                              HR_FORMAT_PARAMS(curr),
   374                              curr->prev_top_at_mark_start(),
   375                              curr->next_top_at_mark_start(),
   376                              curr->age_in_surv_rate_group_cond());
   377       curr = curr->get_next_young_region();
   378     }
   379   }
   381   gclog_or_tty->print_cr("");
   382 }
   384 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   385 {
   386   // Claim the right to put the region on the dirty cards region list
   387   // by installing a self pointer.
   388   HeapRegion* next = hr->get_next_dirty_cards_region();
   389   if (next == NULL) {
   390     HeapRegion* res = (HeapRegion*)
   391       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   392                           NULL);
   393     if (res == NULL) {
   394       HeapRegion* head;
   395       do {
   396         // Put the region to the dirty cards region list.
   397         head = _dirty_cards_region_list;
   398         next = (HeapRegion*)
   399           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   400         if (next == head) {
   401           assert(hr->get_next_dirty_cards_region() == hr,
   402                  "hr->get_next_dirty_cards_region() != hr");
   403           if (next == NULL) {
   404             // The last region in the list points to itself.
   405             hr->set_next_dirty_cards_region(hr);
   406           } else {
   407             hr->set_next_dirty_cards_region(next);
   408           }
   409         }
   410       } while (next != head);
   411     }
   412   }
   413 }
   415 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   416 {
   417   HeapRegion* head;
   418   HeapRegion* hr;
   419   do {
   420     head = _dirty_cards_region_list;
   421     if (head == NULL) {
   422       return NULL;
   423     }
   424     HeapRegion* new_head = head->get_next_dirty_cards_region();
   425     if (head == new_head) {
   426       // The last region.
   427       new_head = NULL;
   428     }
   429     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   430                                           head);
   431   } while (hr != head);
   432   assert(hr != NULL, "invariant");
   433   hr->set_next_dirty_cards_region(NULL);
   434   return hr;
   435 }
   437 void G1CollectedHeap::stop_conc_gc_threads() {
   438   _cg1r->stop();
   439   _cmThread->stop();
   440 }
   442 #ifdef ASSERT
   443 // A region is added to the collection set as it is retired
   444 // so an address p can point to a region which will be in the
   445 // collection set but has not yet been retired.  This method
   446 // therefore is only accurate during a GC pause after all
   447 // regions have been retired.  It is used for debugging
   448 // to check if an nmethod has references to objects that can
   449 // be move during a partial collection.  Though it can be
   450 // inaccurate, it is sufficient for G1 because the conservative
   451 // implementation of is_scavengable() for G1 will indicate that
   452 // all nmethods must be scanned during a partial collection.
   453 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
   454   HeapRegion* hr = heap_region_containing(p);
   455   return hr != NULL && hr->in_collection_set();
   456 }
   457 #endif
   459 // Returns true if the reference points to an object that
   460 // can move in an incremental collecction.
   461 bool G1CollectedHeap::is_scavengable(const void* p) {
   462   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   463   G1CollectorPolicy* g1p = g1h->g1_policy();
   464   HeapRegion* hr = heap_region_containing(p);
   465   if (hr == NULL) {
   466      // perm gen (or null)
   467      return false;
   468   } else {
   469     return !hr->isHumongous();
   470   }
   471 }
   473 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   474   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   475   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   477   // Count the dirty cards at the start.
   478   CountNonCleanMemRegionClosure count1(this);
   479   ct_bs->mod_card_iterate(&count1);
   480   int orig_count = count1.n();
   482   // First clear the logged cards.
   483   ClearLoggedCardTableEntryClosure clear;
   484   dcqs.set_closure(&clear);
   485   dcqs.apply_closure_to_all_completed_buffers();
   486   dcqs.iterate_closure_all_threads(false);
   487   clear.print_histo();
   489   // Now ensure that there's no dirty cards.
   490   CountNonCleanMemRegionClosure count2(this);
   491   ct_bs->mod_card_iterate(&count2);
   492   if (count2.n() != 0) {
   493     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   494                            count2.n(), orig_count);
   495   }
   496   guarantee(count2.n() == 0, "Card table should be clean.");
   498   RedirtyLoggedCardTableEntryClosure redirty;
   499   JavaThread::dirty_card_queue_set().set_closure(&redirty);
   500   dcqs.apply_closure_to_all_completed_buffers();
   501   dcqs.iterate_closure_all_threads(false);
   502   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   503                          clear.calls(), orig_count);
   504   guarantee(redirty.calls() == clear.calls(),
   505             "Or else mechanism is broken.");
   507   CountNonCleanMemRegionClosure count3(this);
   508   ct_bs->mod_card_iterate(&count3);
   509   if (count3.n() != orig_count) {
   510     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   511                            orig_count, count3.n());
   512     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   513   }
   515   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
   516 }
   518 // Private class members.
   520 G1CollectedHeap* G1CollectedHeap::_g1h;
   522 // Private methods.
   524 HeapRegion*
   525 G1CollectedHeap::new_region_try_secondary_free_list() {
   526   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   527   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
   528     if (!_secondary_free_list.is_empty()) {
   529       if (G1ConcRegionFreeingVerbose) {
   530         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   531                                "secondary_free_list has %u entries",
   532                                _secondary_free_list.length());
   533       }
   534       // It looks as if there are free regions available on the
   535       // secondary_free_list. Let's move them to the free_list and try
   536       // again to allocate from it.
   537       append_secondary_free_list();
   539       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
   540              "empty we should have moved at least one entry to the free_list");
   541       HeapRegion* res = _free_list.remove_head();
   542       if (G1ConcRegionFreeingVerbose) {
   543         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   544                                "allocated "HR_FORMAT" from secondary_free_list",
   545                                HR_FORMAT_PARAMS(res));
   546       }
   547       return res;
   548     }
   550     // Wait here until we get notifed either when (a) there are no
   551     // more free regions coming or (b) some regions have been moved on
   552     // the secondary_free_list.
   553     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
   554   }
   556   if (G1ConcRegionFreeingVerbose) {
   557     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   558                            "could not allocate from secondary_free_list");
   559   }
   560   return NULL;
   561 }
   563 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
   564   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
   565          "the only time we use this to allocate a humongous region is "
   566          "when we are allocating a single humongous region");
   568   HeapRegion* res;
   569   if (G1StressConcRegionFreeing) {
   570     if (!_secondary_free_list.is_empty()) {
   571       if (G1ConcRegionFreeingVerbose) {
   572         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   573                                "forced to look at the secondary_free_list");
   574       }
   575       res = new_region_try_secondary_free_list();
   576       if (res != NULL) {
   577         return res;
   578       }
   579     }
   580   }
   581   res = _free_list.remove_head_or_null();
   582   if (res == NULL) {
   583     if (G1ConcRegionFreeingVerbose) {
   584       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   585                              "res == NULL, trying the secondary_free_list");
   586     }
   587     res = new_region_try_secondary_free_list();
   588   }
   589   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
   590     // Currently, only attempts to allocate GC alloc regions set
   591     // do_expand to true. So, we should only reach here during a
   592     // safepoint. If this assumption changes we might have to
   593     // reconsider the use of _expand_heap_after_alloc_failure.
   594     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
   596     ergo_verbose1(ErgoHeapSizing,
   597                   "attempt heap expansion",
   598                   ergo_format_reason("region allocation request failed")
   599                   ergo_format_byte("allocation request"),
   600                   word_size * HeapWordSize);
   601     if (expand(word_size * HeapWordSize)) {
   602       // Given that expand() succeeded in expanding the heap, and we
   603       // always expand the heap by an amount aligned to the heap
   604       // region size, the free list should in theory not be empty. So
   605       // it would probably be OK to use remove_head(). But the extra
   606       // check for NULL is unlikely to be a performance issue here (we
   607       // just expanded the heap!) so let's just be conservative and
   608       // use remove_head_or_null().
   609       res = _free_list.remove_head_or_null();
   610     } else {
   611       _expand_heap_after_alloc_failure = false;
   612     }
   613   }
   614   return res;
   615 }
   617 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
   618                                                         size_t word_size) {
   619   assert(isHumongous(word_size), "word_size should be humongous");
   620   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   622   uint first = G1_NULL_HRS_INDEX;
   623   if (num_regions == 1) {
   624     // Only one region to allocate, no need to go through the slower
   625     // path. The caller will attempt the expasion if this fails, so
   626     // let's not try to expand here too.
   627     HeapRegion* hr = new_region(word_size, false /* do_expand */);
   628     if (hr != NULL) {
   629       first = hr->hrs_index();
   630     } else {
   631       first = G1_NULL_HRS_INDEX;
   632     }
   633   } else {
   634     // We can't allocate humongous regions while cleanupComplete() is
   635     // running, since some of the regions we find to be empty might not
   636     // yet be added to the free list and it is not straightforward to
   637     // know which list they are on so that we can remove them. Note
   638     // that we only need to do this if we need to allocate more than
   639     // one region to satisfy the current humongous allocation
   640     // request. If we are only allocating one region we use the common
   641     // region allocation code (see above).
   642     wait_while_free_regions_coming();
   643     append_secondary_free_list_if_not_empty_with_lock();
   645     if (free_regions() >= num_regions) {
   646       first = _hrs.find_contiguous(num_regions);
   647       if (first != G1_NULL_HRS_INDEX) {
   648         for (uint i = first; i < first + num_regions; ++i) {
   649           HeapRegion* hr = region_at(i);
   650           assert(hr->is_empty(), "sanity");
   651           assert(is_on_master_free_list(hr), "sanity");
   652           hr->set_pending_removal(true);
   653         }
   654         _free_list.remove_all_pending(num_regions);
   655       }
   656     }
   657   }
   658   return first;
   659 }
   661 HeapWord*
   662 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
   663                                                            uint num_regions,
   664                                                            size_t word_size) {
   665   assert(first != G1_NULL_HRS_INDEX, "pre-condition");
   666   assert(isHumongous(word_size), "word_size should be humongous");
   667   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   669   // Index of last region in the series + 1.
   670   uint last = first + num_regions;
   672   // We need to initialize the region(s) we just discovered. This is
   673   // a bit tricky given that it can happen concurrently with
   674   // refinement threads refining cards on these regions and
   675   // potentially wanting to refine the BOT as they are scanning
   676   // those cards (this can happen shortly after a cleanup; see CR
   677   // 6991377). So we have to set up the region(s) carefully and in
   678   // a specific order.
   680   // The word size sum of all the regions we will allocate.
   681   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
   682   assert(word_size <= word_size_sum, "sanity");
   684   // This will be the "starts humongous" region.
   685   HeapRegion* first_hr = region_at(first);
   686   // The header of the new object will be placed at the bottom of
   687   // the first region.
   688   HeapWord* new_obj = first_hr->bottom();
   689   // This will be the new end of the first region in the series that
   690   // should also match the end of the last region in the seriers.
   691   HeapWord* new_end = new_obj + word_size_sum;
   692   // This will be the new top of the first region that will reflect
   693   // this allocation.
   694   HeapWord* new_top = new_obj + word_size;
   696   // First, we need to zero the header of the space that we will be
   697   // allocating. When we update top further down, some refinement
   698   // threads might try to scan the region. By zeroing the header we
   699   // ensure that any thread that will try to scan the region will
   700   // come across the zero klass word and bail out.
   701   //
   702   // NOTE: It would not have been correct to have used
   703   // CollectedHeap::fill_with_object() and make the space look like
   704   // an int array. The thread that is doing the allocation will
   705   // later update the object header to a potentially different array
   706   // type and, for a very short period of time, the klass and length
   707   // fields will be inconsistent. This could cause a refinement
   708   // thread to calculate the object size incorrectly.
   709   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   711   // We will set up the first region as "starts humongous". This
   712   // will also update the BOT covering all the regions to reflect
   713   // that there is a single object that starts at the bottom of the
   714   // first region.
   715   first_hr->set_startsHumongous(new_top, new_end);
   717   // Then, if there are any, we will set up the "continues
   718   // humongous" regions.
   719   HeapRegion* hr = NULL;
   720   for (uint i = first + 1; i < last; ++i) {
   721     hr = region_at(i);
   722     hr->set_continuesHumongous(first_hr);
   723   }
   724   // If we have "continues humongous" regions (hr != NULL), then the
   725   // end of the last one should match new_end.
   726   assert(hr == NULL || hr->end() == new_end, "sanity");
   728   // Up to this point no concurrent thread would have been able to
   729   // do any scanning on any region in this series. All the top
   730   // fields still point to bottom, so the intersection between
   731   // [bottom,top] and [card_start,card_end] will be empty. Before we
   732   // update the top fields, we'll do a storestore to make sure that
   733   // no thread sees the update to top before the zeroing of the
   734   // object header and the BOT initialization.
   735   OrderAccess::storestore();
   737   // Now that the BOT and the object header have been initialized,
   738   // we can update top of the "starts humongous" region.
   739   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
   740          "new_top should be in this region");
   741   first_hr->set_top(new_top);
   742   if (_hr_printer.is_active()) {
   743     HeapWord* bottom = first_hr->bottom();
   744     HeapWord* end = first_hr->orig_end();
   745     if ((first + 1) == last) {
   746       // the series has a single humongous region
   747       _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
   748     } else {
   749       // the series has more than one humongous regions
   750       _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
   751     }
   752   }
   754   // Now, we will update the top fields of the "continues humongous"
   755   // regions. The reason we need to do this is that, otherwise,
   756   // these regions would look empty and this will confuse parts of
   757   // G1. For example, the code that looks for a consecutive number
   758   // of empty regions will consider them empty and try to
   759   // re-allocate them. We can extend is_empty() to also include
   760   // !continuesHumongous(), but it is easier to just update the top
   761   // fields here. The way we set top for all regions (i.e., top ==
   762   // end for all regions but the last one, top == new_top for the
   763   // last one) is actually used when we will free up the humongous
   764   // region in free_humongous_region().
   765   hr = NULL;
   766   for (uint i = first + 1; i < last; ++i) {
   767     hr = region_at(i);
   768     if ((i + 1) == last) {
   769       // last continues humongous region
   770       assert(hr->bottom() < new_top && new_top <= hr->end(),
   771              "new_top should fall on this region");
   772       hr->set_top(new_top);
   773       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
   774     } else {
   775       // not last one
   776       assert(new_top > hr->end(), "new_top should be above this region");
   777       hr->set_top(hr->end());
   778       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
   779     }
   780   }
   781   // If we have continues humongous regions (hr != NULL), then the
   782   // end of the last one should match new_end and its top should
   783   // match new_top.
   784   assert(hr == NULL ||
   785          (hr->end() == new_end && hr->top() == new_top), "sanity");
   787   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
   788   _summary_bytes_used += first_hr->used();
   789   _humongous_set.add(first_hr);
   791   return new_obj;
   792 }
   794 // If could fit into free regions w/o expansion, try.
   795 // Otherwise, if can expand, do so.
   796 // Otherwise, if using ex regions might help, try with ex given back.
   797 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   798   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   800   verify_region_sets_optional();
   802   size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
   803   uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
   804   uint x_num = expansion_regions();
   805   uint fs = _hrs.free_suffix();
   806   uint first = humongous_obj_allocate_find_first(num_regions, word_size);
   807   if (first == G1_NULL_HRS_INDEX) {
   808     // The only thing we can do now is attempt expansion.
   809     if (fs + x_num >= num_regions) {
   810       // If the number of regions we're trying to allocate for this
   811       // object is at most the number of regions in the free suffix,
   812       // then the call to humongous_obj_allocate_find_first() above
   813       // should have succeeded and we wouldn't be here.
   814       //
   815       // We should only be trying to expand when the free suffix is
   816       // not sufficient for the object _and_ we have some expansion
   817       // room available.
   818       assert(num_regions > fs, "earlier allocation should have succeeded");
   820       ergo_verbose1(ErgoHeapSizing,
   821                     "attempt heap expansion",
   822                     ergo_format_reason("humongous allocation request failed")
   823                     ergo_format_byte("allocation request"),
   824                     word_size * HeapWordSize);
   825       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
   826         // Even though the heap was expanded, it might not have
   827         // reached the desired size. So, we cannot assume that the
   828         // allocation will succeed.
   829         first = humongous_obj_allocate_find_first(num_regions, word_size);
   830       }
   831     }
   832   }
   834   HeapWord* result = NULL;
   835   if (first != G1_NULL_HRS_INDEX) {
   836     result =
   837       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
   838     assert(result != NULL, "it should always return a valid result");
   840     // A successful humongous object allocation changes the used space
   841     // information of the old generation so we need to recalculate the
   842     // sizes and update the jstat counters here.
   843     g1mm()->update_sizes();
   844   }
   846   verify_region_sets_optional();
   848   return result;
   849 }
   851 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   852   assert_heap_not_locked_and_not_at_safepoint();
   853   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
   855   unsigned int dummy_gc_count_before;
   856   return attempt_allocation(word_size, &dummy_gc_count_before);
   857 }
   859 HeapWord*
   860 G1CollectedHeap::mem_allocate(size_t word_size,
   861                               bool*  gc_overhead_limit_was_exceeded) {
   862   assert_heap_not_locked_and_not_at_safepoint();
   864   // Loop until the allocation is satisified, or unsatisfied after GC.
   865   for (int try_count = 1; /* we'll return */; try_count += 1) {
   866     unsigned int gc_count_before;
   868     HeapWord* result = NULL;
   869     if (!isHumongous(word_size)) {
   870       result = attempt_allocation(word_size, &gc_count_before);
   871     } else {
   872       result = attempt_allocation_humongous(word_size, &gc_count_before);
   873     }
   874     if (result != NULL) {
   875       return result;
   876     }
   878     // Create the garbage collection operation...
   879     VM_G1CollectForAllocation op(gc_count_before, word_size);
   880     // ...and get the VM thread to execute it.
   881     VMThread::execute(&op);
   883     if (op.prologue_succeeded() && op.pause_succeeded()) {
   884       // If the operation was successful we'll return the result even
   885       // if it is NULL. If the allocation attempt failed immediately
   886       // after a Full GC, it's unlikely we'll be able to allocate now.
   887       HeapWord* result = op.result();
   888       if (result != NULL && !isHumongous(word_size)) {
   889         // Allocations that take place on VM operations do not do any
   890         // card dirtying and we have to do it here. We only have to do
   891         // this for non-humongous allocations, though.
   892         dirty_young_block(result, word_size);
   893       }
   894       return result;
   895     } else {
   896       assert(op.result() == NULL,
   897              "the result should be NULL if the VM op did not succeed");
   898     }
   900     // Give a warning if we seem to be looping forever.
   901     if ((QueuedAllocationWarningCount > 0) &&
   902         (try_count % QueuedAllocationWarningCount == 0)) {
   903       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
   904     }
   905   }
   907   ShouldNotReachHere();
   908   return NULL;
   909 }
   911 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   912                                            unsigned int *gc_count_before_ret) {
   913   // Make sure you read the note in attempt_allocation_humongous().
   915   assert_heap_not_locked_and_not_at_safepoint();
   916   assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
   917          "be called for humongous allocation requests");
   919   // We should only get here after the first-level allocation attempt
   920   // (attempt_allocation()) failed to allocate.
   922   // We will loop until a) we manage to successfully perform the
   923   // allocation or b) we successfully schedule a collection which
   924   // fails to perform the allocation. b) is the only case when we'll
   925   // return NULL.
   926   HeapWord* result = NULL;
   927   for (int try_count = 1; /* we'll return */; try_count += 1) {
   928     bool should_try_gc;
   929     unsigned int gc_count_before;
   931     {
   932       MutexLockerEx x(Heap_lock);
   934       result = _mutator_alloc_region.attempt_allocation_locked(word_size,
   935                                                       false /* bot_updates */);
   936       if (result != NULL) {
   937         return result;
   938       }
   940       // If we reach here, attempt_allocation_locked() above failed to
   941       // allocate a new region. So the mutator alloc region should be NULL.
   942       assert(_mutator_alloc_region.get() == NULL, "only way to get here");
   944       if (GC_locker::is_active_and_needs_gc()) {
   945         if (g1_policy()->can_expand_young_list()) {
   946           // No need for an ergo verbose message here,
   947           // can_expand_young_list() does this when it returns true.
   948           result = _mutator_alloc_region.attempt_allocation_force(word_size,
   949                                                       false /* bot_updates */);
   950           if (result != NULL) {
   951             return result;
   952           }
   953         }
   954         should_try_gc = false;
   955       } else {
   956         // The GCLocker may not be active but the GCLocker initiated
   957         // GC may not yet have been performed (GCLocker::needs_gc()
   958         // returns true). In this case we do not try this GC and
   959         // wait until the GCLocker initiated GC is performed, and
   960         // then retry the allocation.
   961         if (GC_locker::needs_gc()) {
   962           should_try_gc = false;
   963         } else {
   964           // Read the GC count while still holding the Heap_lock.
   965           gc_count_before = total_collections();
   966           should_try_gc = true;
   967         }
   968       }
   969     }
   971     if (should_try_gc) {
   972       bool succeeded;
   973       result = do_collection_pause(word_size, gc_count_before, &succeeded);
   974       if (result != NULL) {
   975         assert(succeeded, "only way to get back a non-NULL result");
   976         return result;
   977       }
   979       if (succeeded) {
   980         // If we get here we successfully scheduled a collection which
   981         // failed to allocate. No point in trying to allocate
   982         // further. We'll just return NULL.
   983         MutexLockerEx x(Heap_lock);
   984         *gc_count_before_ret = total_collections();
   985         return NULL;
   986       }
   987     } else {
   988       // The GCLocker is either active or the GCLocker initiated
   989       // GC has not yet been performed. Stall until it is and
   990       // then retry the allocation.
   991       GC_locker::stall_until_clear();
   992     }
   994     // We can reach here if we were unsuccessul in scheduling a
   995     // collection (because another thread beat us to it) or if we were
   996     // stalled due to the GC locker. In either can we should retry the
   997     // allocation attempt in case another thread successfully
   998     // performed a collection and reclaimed enough space. We do the
   999     // first attempt (without holding the Heap_lock) here and the
  1000     // follow-on attempt will be at the start of the next loop
  1001     // iteration (after taking the Heap_lock).
  1002     result = _mutator_alloc_region.attempt_allocation(word_size,
  1003                                                       false /* bot_updates */);
  1004     if (result != NULL) {
  1005       return result;
  1008     // Give a warning if we seem to be looping forever.
  1009     if ((QueuedAllocationWarningCount > 0) &&
  1010         (try_count % QueuedAllocationWarningCount == 0)) {
  1011       warning("G1CollectedHeap::attempt_allocation_slow() "
  1012               "retries %d times", try_count);
  1016   ShouldNotReachHere();
  1017   return NULL;
  1020 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
  1021                                           unsigned int * gc_count_before_ret) {
  1022   // The structure of this method has a lot of similarities to
  1023   // attempt_allocation_slow(). The reason these two were not merged
  1024   // into a single one is that such a method would require several "if
  1025   // allocation is not humongous do this, otherwise do that"
  1026   // conditional paths which would obscure its flow. In fact, an early
  1027   // version of this code did use a unified method which was harder to
  1028   // follow and, as a result, it had subtle bugs that were hard to
  1029   // track down. So keeping these two methods separate allows each to
  1030   // be more readable. It will be good to keep these two in sync as
  1031   // much as possible.
  1033   assert_heap_not_locked_and_not_at_safepoint();
  1034   assert(isHumongous(word_size), "attempt_allocation_humongous() "
  1035          "should only be called for humongous allocations");
  1037   // Humongous objects can exhaust the heap quickly, so we should check if we
  1038   // need to start a marking cycle at each humongous object allocation. We do
  1039   // the check before we do the actual allocation. The reason for doing it
  1040   // before the allocation is that we avoid having to keep track of the newly
  1041   // allocated memory while we do a GC.
  1042   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
  1043                                            word_size)) {
  1044     collect(GCCause::_g1_humongous_allocation);
  1047   // We will loop until a) we manage to successfully perform the
  1048   // allocation or b) we successfully schedule a collection which
  1049   // fails to perform the allocation. b) is the only case when we'll
  1050   // return NULL.
  1051   HeapWord* result = NULL;
  1052   for (int try_count = 1; /* we'll return */; try_count += 1) {
  1053     bool should_try_gc;
  1054     unsigned int gc_count_before;
  1057       MutexLockerEx x(Heap_lock);
  1059       // Given that humongous objects are not allocated in young
  1060       // regions, we'll first try to do the allocation without doing a
  1061       // collection hoping that there's enough space in the heap.
  1062       result = humongous_obj_allocate(word_size);
  1063       if (result != NULL) {
  1064         return result;
  1067       if (GC_locker::is_active_and_needs_gc()) {
  1068         should_try_gc = false;
  1069       } else {
  1070          // The GCLocker may not be active but the GCLocker initiated
  1071         // GC may not yet have been performed (GCLocker::needs_gc()
  1072         // returns true). In this case we do not try this GC and
  1073         // wait until the GCLocker initiated GC is performed, and
  1074         // then retry the allocation.
  1075         if (GC_locker::needs_gc()) {
  1076           should_try_gc = false;
  1077         } else {
  1078           // Read the GC count while still holding the Heap_lock.
  1079           gc_count_before = total_collections();
  1080           should_try_gc = true;
  1085     if (should_try_gc) {
  1086       // If we failed to allocate the humongous object, we should try to
  1087       // do a collection pause (if we're allowed) in case it reclaims
  1088       // enough space for the allocation to succeed after the pause.
  1090       bool succeeded;
  1091       result = do_collection_pause(word_size, gc_count_before, &succeeded);
  1092       if (result != NULL) {
  1093         assert(succeeded, "only way to get back a non-NULL result");
  1094         return result;
  1097       if (succeeded) {
  1098         // If we get here we successfully scheduled a collection which
  1099         // failed to allocate. No point in trying to allocate
  1100         // further. We'll just return NULL.
  1101         MutexLockerEx x(Heap_lock);
  1102         *gc_count_before_ret = total_collections();
  1103         return NULL;
  1105     } else {
  1106       // The GCLocker is either active or the GCLocker initiated
  1107       // GC has not yet been performed. Stall until it is and
  1108       // then retry the allocation.
  1109       GC_locker::stall_until_clear();
  1112     // We can reach here if we were unsuccessul in scheduling a
  1113     // collection (because another thread beat us to it) or if we were
  1114     // stalled due to the GC locker. In either can we should retry the
  1115     // allocation attempt in case another thread successfully
  1116     // performed a collection and reclaimed enough space.  Give a
  1117     // warning if we seem to be looping forever.
  1119     if ((QueuedAllocationWarningCount > 0) &&
  1120         (try_count % QueuedAllocationWarningCount == 0)) {
  1121       warning("G1CollectedHeap::attempt_allocation_humongous() "
  1122               "retries %d times", try_count);
  1126   ShouldNotReachHere();
  1127   return NULL;
  1130 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
  1131                                        bool expect_null_mutator_alloc_region) {
  1132   assert_at_safepoint(true /* should_be_vm_thread */);
  1133   assert(_mutator_alloc_region.get() == NULL ||
  1134                                              !expect_null_mutator_alloc_region,
  1135          "the current alloc region was unexpectedly found to be non-NULL");
  1137   if (!isHumongous(word_size)) {
  1138     return _mutator_alloc_region.attempt_allocation_locked(word_size,
  1139                                                       false /* bot_updates */);
  1140   } else {
  1141     HeapWord* result = humongous_obj_allocate(word_size);
  1142     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
  1143       g1_policy()->set_initiate_conc_mark_if_possible();
  1145     return result;
  1148   ShouldNotReachHere();
  1151 class PostMCRemSetClearClosure: public HeapRegionClosure {
  1152   G1CollectedHeap* _g1h;
  1153   ModRefBarrierSet* _mr_bs;
  1154 public:
  1155   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
  1156     _g1h(g1h), _mr_bs(mr_bs) { }
  1157   bool doHeapRegion(HeapRegion* r) {
  1158     if (r->continuesHumongous()) {
  1159       return false;
  1161     _g1h->reset_gc_time_stamps(r);
  1162     HeapRegionRemSet* hrrs = r->rem_set();
  1163     if (hrrs != NULL) hrrs->clear();
  1164     // You might think here that we could clear just the cards
  1165     // corresponding to the used region.  But no: if we leave a dirty card
  1166     // in a region we might allocate into, then it would prevent that card
  1167     // from being enqueued, and cause it to be missed.
  1168     // Re: the performance cost: we shouldn't be doing full GC anyway!
  1169     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  1170     return false;
  1172 };
  1174 void G1CollectedHeap::clear_rsets_post_compaction() {
  1175   PostMCRemSetClearClosure rs_clear(this, mr_bs());
  1176   heap_region_iterate(&rs_clear);
  1179 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  1180   G1CollectedHeap*   _g1h;
  1181   UpdateRSOopClosure _cl;
  1182   int                _worker_i;
  1183 public:
  1184   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
  1185     _cl(g1->g1_rem_set(), worker_i),
  1186     _worker_i(worker_i),
  1187     _g1h(g1)
  1188   { }
  1190   bool doHeapRegion(HeapRegion* r) {
  1191     if (!r->continuesHumongous()) {
  1192       _cl.set_from(r);
  1193       r->oop_iterate(&_cl);
  1195     return false;
  1197 };
  1199 class ParRebuildRSTask: public AbstractGangTask {
  1200   G1CollectedHeap* _g1;
  1201 public:
  1202   ParRebuildRSTask(G1CollectedHeap* g1)
  1203     : AbstractGangTask("ParRebuildRSTask"),
  1204       _g1(g1)
  1205   { }
  1207   void work(uint worker_id) {
  1208     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
  1209     _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
  1210                                           _g1->workers()->active_workers(),
  1211                                          HeapRegion::RebuildRSClaimValue);
  1213 };
  1215 class PostCompactionPrinterClosure: public HeapRegionClosure {
  1216 private:
  1217   G1HRPrinter* _hr_printer;
  1218 public:
  1219   bool doHeapRegion(HeapRegion* hr) {
  1220     assert(!hr->is_young(), "not expecting to find young regions");
  1221     // We only generate output for non-empty regions.
  1222     if (!hr->is_empty()) {
  1223       if (!hr->isHumongous()) {
  1224         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
  1225       } else if (hr->startsHumongous()) {
  1226         if (hr->region_num() == 1) {
  1227           // single humongous region
  1228           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
  1229         } else {
  1230           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
  1232       } else {
  1233         assert(hr->continuesHumongous(), "only way to get here");
  1234         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
  1237     return false;
  1240   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
  1241     : _hr_printer(hr_printer) { }
  1242 };
  1244 void G1CollectedHeap::print_hrs_post_compaction() {
  1245   PostCompactionPrinterClosure cl(hr_printer());
  1246   heap_region_iterate(&cl);
  1249 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1250                                     bool clear_all_soft_refs,
  1251                                     size_t word_size) {
  1252   assert_at_safepoint(true /* should_be_vm_thread */);
  1254   if (GC_locker::check_active_before_gc()) {
  1255     return false;
  1258   SvcGCMarker sgcm(SvcGCMarker::FULL);
  1259   ResourceMark rm;
  1261   print_heap_before_gc();
  1263   HRSPhaseSetter x(HRSPhaseFullGC);
  1264   verify_region_sets_optional();
  1266   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
  1267                            collector_policy()->should_clear_all_soft_refs();
  1269   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  1272     IsGCActiveMark x;
  1274     // Timing
  1275     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
  1276     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
  1277     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  1279     TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
  1280     TraceCollectorStats tcs(g1mm()->full_collection_counters());
  1281     TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
  1283     double start = os::elapsedTime();
  1284     g1_policy()->record_full_collection_start();
  1286     // Note: When we have a more flexible GC logging framework that
  1287     // allows us to add optional attributes to a GC log record we
  1288     // could consider timing and reporting how long we wait in the
  1289     // following two methods.
  1290     wait_while_free_regions_coming();
  1291     // If we start the compaction before the CM threads finish
  1292     // scanning the root regions we might trip them over as we'll
  1293     // be moving objects / updating references. So let's wait until
  1294     // they are done. By telling them to abort, they should complete
  1295     // early.
  1296     _cm->root_regions()->abort();
  1297     _cm->root_regions()->wait_until_scan_finished();
  1298     append_secondary_free_list_if_not_empty_with_lock();
  1300     gc_prologue(true);
  1301     increment_total_collections(true /* full gc */);
  1302     increment_old_marking_cycles_started();
  1304     size_t g1h_prev_used = used();
  1305     assert(used() == recalculate_used(), "Should be equal");
  1307     if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  1308       HandleMark hm;  // Discard invalid handles created during verification
  1309       gclog_or_tty->print(" VerifyBeforeGC:");
  1310       prepare_for_verify();
  1311       Universe::verify(/* silent      */ false,
  1312                        /* option      */ VerifyOption_G1UsePrevMarking);
  1315     pre_full_gc_dump();
  1317     COMPILER2_PRESENT(DerivedPointerTable::clear());
  1319     // Disable discovery and empty the discovered lists
  1320     // for the CM ref processor.
  1321     ref_processor_cm()->disable_discovery();
  1322     ref_processor_cm()->abandon_partial_discovery();
  1323     ref_processor_cm()->verify_no_references_recorded();
  1325     // Abandon current iterations of concurrent marking and concurrent
  1326     // refinement, if any are in progress. We have to do this before
  1327     // wait_until_scan_finished() below.
  1328     concurrent_mark()->abort();
  1330     // Make sure we'll choose a new allocation region afterwards.
  1331     release_mutator_alloc_region();
  1332     abandon_gc_alloc_regions();
  1333     g1_rem_set()->cleanupHRRS();
  1335     // We should call this after we retire any currently active alloc
  1336     // regions so that all the ALLOC / RETIRE events are generated
  1337     // before the start GC event.
  1338     _hr_printer.start_gc(true /* full */, (size_t) total_collections());
  1340     // We may have added regions to the current incremental collection
  1341     // set between the last GC or pause and now. We need to clear the
  1342     // incremental collection set and then start rebuilding it afresh
  1343     // after this full GC.
  1344     abandon_collection_set(g1_policy()->inc_cset_head());
  1345     g1_policy()->clear_incremental_cset();
  1346     g1_policy()->stop_incremental_cset_building();
  1348     tear_down_region_sets(false /* free_list_only */);
  1349     g1_policy()->set_gcs_are_young(true);
  1351     // See the comments in g1CollectedHeap.hpp and
  1352     // G1CollectedHeap::ref_processing_init() about
  1353     // how reference processing currently works in G1.
  1355     // Temporarily make discovery by the STW ref processor single threaded (non-MT).
  1356     ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
  1358     // Temporarily clear the STW ref processor's _is_alive_non_header field.
  1359     ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
  1361     ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
  1362     ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
  1364     // Do collection work
  1366       HandleMark hm;  // Discard invalid handles created during gc
  1367       G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
  1370     assert(free_regions() == 0, "we should not have added any free regions");
  1371     rebuild_region_sets(false /* free_list_only */);
  1373     // Enqueue any discovered reference objects that have
  1374     // not been removed from the discovered lists.
  1375     ref_processor_stw()->enqueue_discovered_references();
  1377     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1379     MemoryService::track_memory_usage();
  1381     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  1382       HandleMark hm;  // Discard invalid handles created during verification
  1383       gclog_or_tty->print(" VerifyAfterGC:");
  1384       prepare_for_verify();
  1385       Universe::verify(/* silent      */ false,
  1386                        /* option      */ VerifyOption_G1UsePrevMarking);
  1390     assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  1391     ref_processor_stw()->verify_no_references_recorded();
  1393     // Note: since we've just done a full GC, concurrent
  1394     // marking is no longer active. Therefore we need not
  1395     // re-enable reference discovery for the CM ref processor.
  1396     // That will be done at the start of the next marking cycle.
  1397     assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
  1398     ref_processor_cm()->verify_no_references_recorded();
  1400     reset_gc_time_stamp();
  1401     // Since everything potentially moved, we will clear all remembered
  1402     // sets, and clear all cards.  Later we will rebuild remebered
  1403     // sets. We will also reset the GC time stamps of the regions.
  1404     clear_rsets_post_compaction();
  1405     check_gc_time_stamps();
  1407     // Resize the heap if necessary.
  1408     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  1410     if (_hr_printer.is_active()) {
  1411       // We should do this after we potentially resize the heap so
  1412       // that all the COMMIT / UNCOMMIT events are generated before
  1413       // the end GC event.
  1415       print_hrs_post_compaction();
  1416       _hr_printer.end_gc(true /* full */, (size_t) total_collections());
  1419     if (_cg1r->use_cache()) {
  1420       _cg1r->clear_and_record_card_counts();
  1421       _cg1r->clear_hot_cache();
  1424     // Rebuild remembered sets of all regions.
  1425     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1426       uint n_workers =
  1427         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  1428                                        workers()->active_workers(),
  1429                                        Threads::number_of_non_daemon_threads());
  1430       assert(UseDynamicNumberOfGCThreads ||
  1431              n_workers == workers()->total_workers(),
  1432              "If not dynamic should be using all the  workers");
  1433       workers()->set_active_workers(n_workers);
  1434       // Set parallel threads in the heap (_n_par_threads) only
  1435       // before a parallel phase and always reset it to 0 after
  1436       // the phase so that the number of parallel threads does
  1437       // no get carried forward to a serial phase where there
  1438       // may be code that is "possibly_parallel".
  1439       set_par_threads(n_workers);
  1441       ParRebuildRSTask rebuild_rs_task(this);
  1442       assert(check_heap_region_claim_values(
  1443              HeapRegion::InitialClaimValue), "sanity check");
  1444       assert(UseDynamicNumberOfGCThreads ||
  1445              workers()->active_workers() == workers()->total_workers(),
  1446         "Unless dynamic should use total workers");
  1447       // Use the most recent number of  active workers
  1448       assert(workers()->active_workers() > 0,
  1449         "Active workers not properly set");
  1450       set_par_threads(workers()->active_workers());
  1451       workers()->run_task(&rebuild_rs_task);
  1452       set_par_threads(0);
  1453       assert(check_heap_region_claim_values(
  1454              HeapRegion::RebuildRSClaimValue), "sanity check");
  1455       reset_heap_region_claim_values();
  1456     } else {
  1457       RebuildRSOutOfRegionClosure rebuild_rs(this);
  1458       heap_region_iterate(&rebuild_rs);
  1461     if (G1Log::fine()) {
  1462       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  1465     if (true) { // FIXME
  1466       // Ask the permanent generation to adjust size for full collections
  1467       perm()->compute_new_size();
  1470     // Start a new incremental collection set for the next pause
  1471     assert(g1_policy()->collection_set() == NULL, "must be");
  1472     g1_policy()->start_incremental_cset_building();
  1474     // Clear the _cset_fast_test bitmap in anticipation of adding
  1475     // regions to the incremental collection set for the next
  1476     // evacuation pause.
  1477     clear_cset_fast_test();
  1479     init_mutator_alloc_region();
  1481     double end = os::elapsedTime();
  1482     g1_policy()->record_full_collection_end();
  1484 #ifdef TRACESPINNING
  1485     ParallelTaskTerminator::print_termination_counts();
  1486 #endif
  1488     gc_epilogue(true);
  1490     // Discard all rset updates
  1491     JavaThread::dirty_card_queue_set().abandon_logs();
  1492     assert(!G1DeferredRSUpdate
  1493            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1495     _young_list->reset_sampled_info();
  1496     // At this point there should be no regions in the
  1497     // entire heap tagged as young.
  1498     assert( check_young_list_empty(true /* check_heap */),
  1499       "young list should be empty at this point");
  1501     // Update the number of full collections that have been completed.
  1502     increment_old_marking_cycles_completed(false /* concurrent */);
  1504     _hrs.verify_optional();
  1505     verify_region_sets_optional();
  1507     print_heap_after_gc();
  1509     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  1510     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  1511     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  1512     // before any GC notifications are raised.
  1513     g1mm()->update_sizes();
  1516   post_full_gc_dump();
  1518   return true;
  1521 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1522   // do_collection() will return whether it succeeded in performing
  1523   // the GC. Currently, there is no facility on the
  1524   // do_full_collection() API to notify the caller than the collection
  1525   // did not succeed (e.g., because it was locked out by the GC
  1526   // locker). So, right now, we'll ignore the return value.
  1527   bool dummy = do_collection(true,                /* explicit_gc */
  1528                              clear_all_soft_refs,
  1529                              0                    /* word_size */);
  1532 // This code is mostly copied from TenuredGeneration.
  1533 void
  1534 G1CollectedHeap::
  1535 resize_if_necessary_after_full_collection(size_t word_size) {
  1536   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
  1538   // Include the current allocation, if any, and bytes that will be
  1539   // pre-allocated to support collections, as "used".
  1540   const size_t used_after_gc = used();
  1541   const size_t capacity_after_gc = capacity();
  1542   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1544   // This is enforced in arguments.cpp.
  1545   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
  1546          "otherwise the code below doesn't make sense");
  1548   // We don't have floating point command-line arguments
  1549   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
  1550   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1551   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
  1552   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1554   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
  1555   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
  1557   // We have to be careful here as these two calculations can overflow
  1558   // 32-bit size_t's.
  1559   double used_after_gc_d = (double) used_after_gc;
  1560   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
  1561   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
  1563   // Let's make sure that they are both under the max heap size, which
  1564   // by default will make them fit into a size_t.
  1565   double desired_capacity_upper_bound = (double) max_heap_size;
  1566   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
  1567                                     desired_capacity_upper_bound);
  1568   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
  1569                                     desired_capacity_upper_bound);
  1571   // We can now safely turn them into size_t's.
  1572   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
  1573   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
  1575   // This assert only makes sense here, before we adjust them
  1576   // with respect to the min and max heap size.
  1577   assert(minimum_desired_capacity <= maximum_desired_capacity,
  1578          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
  1579                  "maximum_desired_capacity = "SIZE_FORMAT,
  1580                  minimum_desired_capacity, maximum_desired_capacity));
  1582   // Should not be greater than the heap max size. No need to adjust
  1583   // it with respect to the heap min size as it's a lower bound (i.e.,
  1584   // we'll try to make the capacity larger than it, not smaller).
  1585   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
  1586   // Should not be less than the heap min size. No need to adjust it
  1587   // with respect to the heap max size as it's an upper bound (i.e.,
  1588   // we'll try to make the capacity smaller than it, not greater).
  1589   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
  1591   if (capacity_after_gc < minimum_desired_capacity) {
  1592     // Don't expand unless it's significant
  1593     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1594     ergo_verbose4(ErgoHeapSizing,
  1595                   "attempt heap expansion",
  1596                   ergo_format_reason("capacity lower than "
  1597                                      "min desired capacity after Full GC")
  1598                   ergo_format_byte("capacity")
  1599                   ergo_format_byte("occupancy")
  1600                   ergo_format_byte_perc("min desired capacity"),
  1601                   capacity_after_gc, used_after_gc,
  1602                   minimum_desired_capacity, (double) MinHeapFreeRatio);
  1603     expand(expand_bytes);
  1605     // No expansion, now see if we want to shrink
  1606   } else if (capacity_after_gc > maximum_desired_capacity) {
  1607     // Capacity too large, compute shrinking size
  1608     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1609     ergo_verbose4(ErgoHeapSizing,
  1610                   "attempt heap shrinking",
  1611                   ergo_format_reason("capacity higher than "
  1612                                      "max desired capacity after Full GC")
  1613                   ergo_format_byte("capacity")
  1614                   ergo_format_byte("occupancy")
  1615                   ergo_format_byte_perc("max desired capacity"),
  1616                   capacity_after_gc, used_after_gc,
  1617                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
  1618     shrink(shrink_bytes);
  1623 HeapWord*
  1624 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
  1625                                            bool* succeeded) {
  1626   assert_at_safepoint(true /* should_be_vm_thread */);
  1628   *succeeded = true;
  1629   // Let's attempt the allocation first.
  1630   HeapWord* result =
  1631     attempt_allocation_at_safepoint(word_size,
  1632                                  false /* expect_null_mutator_alloc_region */);
  1633   if (result != NULL) {
  1634     assert(*succeeded, "sanity");
  1635     return result;
  1638   // In a G1 heap, we're supposed to keep allocation from failing by
  1639   // incremental pauses.  Therefore, at least for now, we'll favor
  1640   // expansion over collection.  (This might change in the future if we can
  1641   // do something smarter than full collection to satisfy a failed alloc.)
  1642   result = expand_and_allocate(word_size);
  1643   if (result != NULL) {
  1644     assert(*succeeded, "sanity");
  1645     return result;
  1648   // Expansion didn't work, we'll try to do a Full GC.
  1649   bool gc_succeeded = do_collection(false, /* explicit_gc */
  1650                                     false, /* clear_all_soft_refs */
  1651                                     word_size);
  1652   if (!gc_succeeded) {
  1653     *succeeded = false;
  1654     return NULL;
  1657   // Retry the allocation
  1658   result = attempt_allocation_at_safepoint(word_size,
  1659                                   true /* expect_null_mutator_alloc_region */);
  1660   if (result != NULL) {
  1661     assert(*succeeded, "sanity");
  1662     return result;
  1665   // Then, try a Full GC that will collect all soft references.
  1666   gc_succeeded = do_collection(false, /* explicit_gc */
  1667                                true,  /* clear_all_soft_refs */
  1668                                word_size);
  1669   if (!gc_succeeded) {
  1670     *succeeded = false;
  1671     return NULL;
  1674   // Retry the allocation once more
  1675   result = attempt_allocation_at_safepoint(word_size,
  1676                                   true /* expect_null_mutator_alloc_region */);
  1677   if (result != NULL) {
  1678     assert(*succeeded, "sanity");
  1679     return result;
  1682   assert(!collector_policy()->should_clear_all_soft_refs(),
  1683          "Flag should have been handled and cleared prior to this point");
  1685   // What else?  We might try synchronous finalization later.  If the total
  1686   // space available is large enough for the allocation, then a more
  1687   // complete compaction phase than we've tried so far might be
  1688   // appropriate.
  1689   assert(*succeeded, "sanity");
  1690   return NULL;
  1693 // Attempting to expand the heap sufficiently
  1694 // to support an allocation of the given "word_size".  If
  1695 // successful, perform the allocation and return the address of the
  1696 // allocated block, or else "NULL".
  1698 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1699   assert_at_safepoint(true /* should_be_vm_thread */);
  1701   verify_region_sets_optional();
  1703   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
  1704   ergo_verbose1(ErgoHeapSizing,
  1705                 "attempt heap expansion",
  1706                 ergo_format_reason("allocation request failed")
  1707                 ergo_format_byte("allocation request"),
  1708                 word_size * HeapWordSize);
  1709   if (expand(expand_bytes)) {
  1710     _hrs.verify_optional();
  1711     verify_region_sets_optional();
  1712     return attempt_allocation_at_safepoint(word_size,
  1713                                  false /* expect_null_mutator_alloc_region */);
  1715   return NULL;
  1718 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
  1719                                              HeapWord* new_end) {
  1720   assert(old_end != new_end, "don't call this otherwise");
  1721   assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
  1723   // Update the committed mem region.
  1724   _g1_committed.set_end(new_end);
  1725   // Tell the card table about the update.
  1726   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1727   // Tell the BOT about the update.
  1728   _bot_shared->resize(_g1_committed.word_size());
  1731 bool G1CollectedHeap::expand(size_t expand_bytes) {
  1732   size_t old_mem_size = _g1_storage.committed_size();
  1733   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
  1734   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1735                                        HeapRegion::GrainBytes);
  1736   ergo_verbose2(ErgoHeapSizing,
  1737                 "expand the heap",
  1738                 ergo_format_byte("requested expansion amount")
  1739                 ergo_format_byte("attempted expansion amount"),
  1740                 expand_bytes, aligned_expand_bytes);
  1742   // First commit the memory.
  1743   HeapWord* old_end = (HeapWord*) _g1_storage.high();
  1744   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
  1745   if (successful) {
  1746     // Then propagate this update to the necessary data structures.
  1747     HeapWord* new_end = (HeapWord*) _g1_storage.high();
  1748     update_committed_space(old_end, new_end);
  1750     FreeRegionList expansion_list("Local Expansion List");
  1751     MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
  1752     assert(mr.start() == old_end, "post-condition");
  1753     // mr might be a smaller region than what was requested if
  1754     // expand_by() was unable to allocate the HeapRegion instances
  1755     assert(mr.end() <= new_end, "post-condition");
  1757     size_t actual_expand_bytes = mr.byte_size();
  1758     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
  1759     assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
  1760            "post-condition");
  1761     if (actual_expand_bytes < aligned_expand_bytes) {
  1762       // We could not expand _hrs to the desired size. In this case we
  1763       // need to shrink the committed space accordingly.
  1764       assert(mr.end() < new_end, "invariant");
  1766       size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
  1767       // First uncommit the memory.
  1768       _g1_storage.shrink_by(diff_bytes);
  1769       // Then propagate this update to the necessary data structures.
  1770       update_committed_space(new_end, mr.end());
  1772     _free_list.add_as_tail(&expansion_list);
  1774     if (_hr_printer.is_active()) {
  1775       HeapWord* curr = mr.start();
  1776       while (curr < mr.end()) {
  1777         HeapWord* curr_end = curr + HeapRegion::GrainWords;
  1778         _hr_printer.commit(curr, curr_end);
  1779         curr = curr_end;
  1781       assert(curr == mr.end(), "post-condition");
  1783     g1_policy()->record_new_heap_size(n_regions());
  1784   } else {
  1785     ergo_verbose0(ErgoHeapSizing,
  1786                   "did not expand the heap",
  1787                   ergo_format_reason("heap expansion operation failed"));
  1788     // The expansion of the virtual storage space was unsuccessful.
  1789     // Let's see if it was because we ran out of swap.
  1790     if (G1ExitOnExpansionFailure &&
  1791         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
  1792       // We had head room...
  1793       vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
  1796   return successful;
  1799 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
  1800   size_t old_mem_size = _g1_storage.committed_size();
  1801   size_t aligned_shrink_bytes =
  1802     ReservedSpace::page_align_size_down(shrink_bytes);
  1803   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1804                                          HeapRegion::GrainBytes);
  1805   uint num_regions_deleted = 0;
  1806   MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
  1807   HeapWord* old_end = (HeapWord*) _g1_storage.high();
  1808   assert(mr.end() == old_end, "post-condition");
  1810   ergo_verbose3(ErgoHeapSizing,
  1811                 "shrink the heap",
  1812                 ergo_format_byte("requested shrinking amount")
  1813                 ergo_format_byte("aligned shrinking amount")
  1814                 ergo_format_byte("attempted shrinking amount"),
  1815                 shrink_bytes, aligned_shrink_bytes, mr.byte_size());
  1816   if (mr.byte_size() > 0) {
  1817     if (_hr_printer.is_active()) {
  1818       HeapWord* curr = mr.end();
  1819       while (curr > mr.start()) {
  1820         HeapWord* curr_end = curr;
  1821         curr -= HeapRegion::GrainWords;
  1822         _hr_printer.uncommit(curr, curr_end);
  1824       assert(curr == mr.start(), "post-condition");
  1827     _g1_storage.shrink_by(mr.byte_size());
  1828     HeapWord* new_end = (HeapWord*) _g1_storage.high();
  1829     assert(mr.start() == new_end, "post-condition");
  1831     _expansion_regions += num_regions_deleted;
  1832     update_committed_space(old_end, new_end);
  1833     HeapRegionRemSet::shrink_heap(n_regions());
  1834     g1_policy()->record_new_heap_size(n_regions());
  1835   } else {
  1836     ergo_verbose0(ErgoHeapSizing,
  1837                   "did not shrink the heap",
  1838                   ergo_format_reason("heap shrinking operation failed"));
  1842 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1843   verify_region_sets_optional();
  1845   // We should only reach here at the end of a Full GC which means we
  1846   // should not not be holding to any GC alloc regions. The method
  1847   // below will make sure of that and do any remaining clean up.
  1848   abandon_gc_alloc_regions();
  1850   // Instead of tearing down / rebuilding the free lists here, we
  1851   // could instead use the remove_all_pending() method on free_list to
  1852   // remove only the ones that we need to remove.
  1853   tear_down_region_sets(true /* free_list_only */);
  1854   shrink_helper(shrink_bytes);
  1855   rebuild_region_sets(true /* free_list_only */);
  1857   _hrs.verify_optional();
  1858   verify_region_sets_optional();
  1861 // Public methods.
  1863 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1864 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1865 #endif // _MSC_VER
  1868 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1869   SharedHeap(policy_),
  1870   _g1_policy(policy_),
  1871   _dirty_card_queue_set(false),
  1872   _into_cset_dirty_card_queue_set(false),
  1873   _is_alive_closure_cm(this),
  1874   _is_alive_closure_stw(this),
  1875   _ref_processor_cm(NULL),
  1876   _ref_processor_stw(NULL),
  1877   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1878   _bot_shared(NULL),
  1879   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1880   _evac_failure_scan_stack(NULL) ,
  1881   _mark_in_progress(false),
  1882   _cg1r(NULL), _summary_bytes_used(0),
  1883   _g1mm(NULL),
  1884   _refine_cte_cl(NULL),
  1885   _full_collection(false),
  1886   _free_list("Master Free List"),
  1887   _secondary_free_list("Secondary Free List"),
  1888   _old_set("Old Set"),
  1889   _humongous_set("Master Humongous Set"),
  1890   _free_regions_coming(false),
  1891   _young_list(new YoungList(this)),
  1892   _gc_time_stamp(0),
  1893   _retained_old_gc_alloc_region(NULL),
  1894   _survivor_plab_stats(YoungPLABSize, PLABWeight),
  1895   _old_plab_stats(OldPLABSize, PLABWeight),
  1896   _expand_heap_after_alloc_failure(true),
  1897   _surviving_young_words(NULL),
  1898   _old_marking_cycles_started(0),
  1899   _old_marking_cycles_completed(0),
  1900   _in_cset_fast_test(NULL),
  1901   _in_cset_fast_test_base(NULL),
  1902   _dirty_cards_region_list(NULL),
  1903   _worker_cset_start_region(NULL),
  1904   _worker_cset_start_region_time_stamp(NULL) {
  1905   _g1h = this; // To catch bugs.
  1906   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1907     vm_exit_during_initialization("Failed necessary allocation.");
  1910   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
  1912   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1913   _task_queues = new RefToScanQueueSet(n_queues);
  1915   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1916   assert(n_rem_sets > 0, "Invariant.");
  1918   HeapRegionRemSetIterator** iter_arr =
  1919     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
  1920   for (int i = 0; i < n_queues; i++) {
  1921     iter_arr[i] = new HeapRegionRemSetIterator();
  1923   _rem_set_iterator = iter_arr;
  1925   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
  1926   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
  1928   for (int i = 0; i < n_queues; i++) {
  1929     RefToScanQueue* q = new RefToScanQueue();
  1930     q->initialize();
  1931     _task_queues->register_queue(i, q);
  1934   clear_cset_start_regions();
  1936   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1937 #ifdef SPARC
  1938   // Issue a stern warning, but allow use for experimentation and debugging.
  1939   if (VM_Version::is_sun4v() && UseMemSetInBOT) {
  1940     assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
  1941     warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
  1942             " on sun4v; please understand that you are using at your own risk!");
  1944 #endif
  1947 jint G1CollectedHeap::initialize() {
  1948   CollectedHeap::pre_initialize();
  1949   os::enable_vtime();
  1951   G1Log::init();
  1953   // Necessary to satisfy locking discipline assertions.
  1955   MutexLocker x(Heap_lock);
  1957   // We have to initialize the printer before committing the heap, as
  1958   // it will be used then.
  1959   _hr_printer.set_active(G1PrintHeapRegions);
  1961   // While there are no constraints in the GC code that HeapWordSize
  1962   // be any particular value, there are multiple other areas in the
  1963   // system which believe this to be true (e.g. oop->object_size in some
  1964   // cases incorrectly returns the size in wordSize units rather than
  1965   // HeapWordSize).
  1966   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  1968   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  1969   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  1971   // Ensure that the sizes are properly aligned.
  1972   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1973   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1975   _cg1r = new ConcurrentG1Refine();
  1977   // Reserve the maximum.
  1978   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
  1979   // Includes the perm-gen.
  1981   // When compressed oops are enabled, the preferred heap base
  1982   // is calculated by subtracting the requested size from the
  1983   // 32Gb boundary and using the result as the base address for
  1984   // heap reservation. If the requested size is not aligned to
  1985   // HeapRegion::GrainBytes (i.e. the alignment that is passed
  1986   // into the ReservedHeapSpace constructor) then the actual
  1987   // base of the reserved heap may end up differing from the
  1988   // address that was requested (i.e. the preferred heap base).
  1989   // If this happens then we could end up using a non-optimal
  1990   // compressed oops mode.
  1992   // Since max_byte_size is aligned to the size of a heap region (checked
  1993   // above), we also need to align the perm gen size as it might not be.
  1994   const size_t total_reserved = max_byte_size +
  1995                                 align_size_up(pgs->max_size(), HeapRegion::GrainBytes);
  1996   Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm");
  1998   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
  2000   ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
  2001                             UseLargePages, addr);
  2003   if (UseCompressedOops) {
  2004     if (addr != NULL && !heap_rs.is_reserved()) {
  2005       // Failed to reserve at specified address - the requested memory
  2006       // region is taken already, for example, by 'java' launcher.
  2007       // Try again to reserver heap higher.
  2008       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
  2010       ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
  2011                                  UseLargePages, addr);
  2013       if (addr != NULL && !heap_rs0.is_reserved()) {
  2014         // Failed to reserve at specified address again - give up.
  2015         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
  2016         assert(addr == NULL, "");
  2018         ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
  2019                                    UseLargePages, addr);
  2020         heap_rs = heap_rs1;
  2021       } else {
  2022         heap_rs = heap_rs0;
  2027   if (!heap_rs.is_reserved()) {
  2028     vm_exit_during_initialization("Could not reserve enough space for object heap");
  2029     return JNI_ENOMEM;
  2032   // It is important to do this in a way such that concurrent readers can't
  2033   // temporarily think somethings in the heap.  (I've actually seen this
  2034   // happen in asserts: DLD.)
  2035   _reserved.set_word_size(0);
  2036   _reserved.set_start((HeapWord*)heap_rs.base());
  2037   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  2039   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
  2041   // Create the gen rem set (and barrier set) for the entire reserved region.
  2042   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  2043   set_barrier_set(rem_set()->bs());
  2044   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  2045     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  2046   } else {
  2047     vm_exit_during_initialization("G1 requires a mod ref bs.");
  2048     return JNI_ENOMEM;
  2051   // Also create a G1 rem set.
  2052   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
  2053     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
  2054   } else {
  2055     vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
  2056     return JNI_ENOMEM;
  2059   // Carve out the G1 part of the heap.
  2061   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  2062   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  2063                            g1_rs.size()/HeapWordSize);
  2064   ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
  2066   _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
  2068   _g1_storage.initialize(g1_rs, 0);
  2069   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  2070   _hrs.initialize((HeapWord*) _g1_reserved.start(),
  2071                   (HeapWord*) _g1_reserved.end(),
  2072                   _expansion_regions);
  2074   // 6843694 - ensure that the maximum region index can fit
  2075   // in the remembered set structures.
  2076   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  2077   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  2079   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  2080   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  2081   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
  2082             "too many cards per region");
  2084   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
  2086   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  2087                                              heap_word_size(init_byte_size));
  2089   _g1h = this;
  2091    _in_cset_fast_test_length = max_regions();
  2092    _in_cset_fast_test_base =
  2093                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
  2095    // We're biasing _in_cset_fast_test to avoid subtracting the
  2096    // beginning of the heap every time we want to index; basically
  2097    // it's the same with what we do with the card table.
  2098    _in_cset_fast_test = _in_cset_fast_test_base -
  2099                ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  2101    // Clear the _cset_fast_test bitmap in anticipation of adding
  2102    // regions to the incremental collection set for the first
  2103    // evacuation pause.
  2104    clear_cset_fast_test();
  2106   // Create the ConcurrentMark data structure and thread.
  2107   // (Must do this late, so that "max_regions" is defined.)
  2108   _cm       = new ConcurrentMark(heap_rs, max_regions());
  2109   _cmThread = _cm->cmThread();
  2111   // Initialize the from_card cache structure of HeapRegionRemSet.
  2112   HeapRegionRemSet::init_heap(max_regions());
  2114   // Now expand into the initial heap size.
  2115   if (!expand(init_byte_size)) {
  2116     vm_exit_during_initialization("Failed to allocate initial heap.");
  2117     return JNI_ENOMEM;
  2120   // Perform any initialization actions delegated to the policy.
  2121   g1_policy()->init();
  2123   _refine_cte_cl =
  2124     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
  2125                                     g1_rem_set(),
  2126                                     concurrent_g1_refine());
  2127   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
  2129   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  2130                                                SATB_Q_FL_lock,
  2131                                                G1SATBProcessCompletedThreshold,
  2132                                                Shared_SATB_Q_lock);
  2134   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2135                                                 DirtyCardQ_FL_lock,
  2136                                                 concurrent_g1_refine()->yellow_zone(),
  2137                                                 concurrent_g1_refine()->red_zone(),
  2138                                                 Shared_DirtyCardQ_lock);
  2140   if (G1DeferredRSUpdate) {
  2141     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2142                                       DirtyCardQ_FL_lock,
  2143                                       -1, // never trigger processing
  2144                                       -1, // no limit on length
  2145                                       Shared_DirtyCardQ_lock,
  2146                                       &JavaThread::dirty_card_queue_set());
  2149   // Initialize the card queue set used to hold cards containing
  2150   // references into the collection set.
  2151   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
  2152                                              DirtyCardQ_FL_lock,
  2153                                              -1, // never trigger processing
  2154                                              -1, // no limit on length
  2155                                              Shared_DirtyCardQ_lock,
  2156                                              &JavaThread::dirty_card_queue_set());
  2158   // In case we're keeping closure specialization stats, initialize those
  2159   // counts and that mechanism.
  2160   SpecializationStats::clear();
  2162   // Do later initialization work for concurrent refinement.
  2163   _cg1r->init();
  2165   // Here we allocate the dummy full region that is required by the
  2166   // G1AllocRegion class. If we don't pass an address in the reserved
  2167   // space here, lots of asserts fire.
  2169   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
  2170                                              _g1_reserved.start());
  2171   // We'll re-use the same region whether the alloc region will
  2172   // require BOT updates or not and, if it doesn't, then a non-young
  2173   // region will complain that it cannot support allocations without
  2174   // BOT updates. So we'll tag the dummy region as young to avoid that.
  2175   dummy_region->set_young();
  2176   // Make sure it's full.
  2177   dummy_region->set_top(dummy_region->end());
  2178   G1AllocRegion::setup(this, dummy_region);
  2180   init_mutator_alloc_region();
  2182   // Do create of the monitoring and management support so that
  2183   // values in the heap have been properly initialized.
  2184   _g1mm = new G1MonitoringSupport(this);
  2186   return JNI_OK;
  2189 void G1CollectedHeap::ref_processing_init() {
  2190   // Reference processing in G1 currently works as follows:
  2191   //
  2192   // * There are two reference processor instances. One is
  2193   //   used to record and process discovered references
  2194   //   during concurrent marking; the other is used to
  2195   //   record and process references during STW pauses
  2196   //   (both full and incremental).
  2197   // * Both ref processors need to 'span' the entire heap as
  2198   //   the regions in the collection set may be dotted around.
  2199   //
  2200   // * For the concurrent marking ref processor:
  2201   //   * Reference discovery is enabled at initial marking.
  2202   //   * Reference discovery is disabled and the discovered
  2203   //     references processed etc during remarking.
  2204   //   * Reference discovery is MT (see below).
  2205   //   * Reference discovery requires a barrier (see below).
  2206   //   * Reference processing may or may not be MT
  2207   //     (depending on the value of ParallelRefProcEnabled
  2208   //     and ParallelGCThreads).
  2209   //   * A full GC disables reference discovery by the CM
  2210   //     ref processor and abandons any entries on it's
  2211   //     discovered lists.
  2212   //
  2213   // * For the STW processor:
  2214   //   * Non MT discovery is enabled at the start of a full GC.
  2215   //   * Processing and enqueueing during a full GC is non-MT.
  2216   //   * During a full GC, references are processed after marking.
  2217   //
  2218   //   * Discovery (may or may not be MT) is enabled at the start
  2219   //     of an incremental evacuation pause.
  2220   //   * References are processed near the end of a STW evacuation pause.
  2221   //   * For both types of GC:
  2222   //     * Discovery is atomic - i.e. not concurrent.
  2223   //     * Reference discovery will not need a barrier.
  2225   SharedHeap::ref_processing_init();
  2226   MemRegion mr = reserved_region();
  2228   // Concurrent Mark ref processor
  2229   _ref_processor_cm =
  2230     new ReferenceProcessor(mr,    // span
  2231                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
  2232                                 // mt processing
  2233                            (int) ParallelGCThreads,
  2234                                 // degree of mt processing
  2235                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
  2236                                 // mt discovery
  2237                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
  2238                                 // degree of mt discovery
  2239                            false,
  2240                                 // Reference discovery is not atomic
  2241                            &_is_alive_closure_cm,
  2242                                 // is alive closure
  2243                                 // (for efficiency/performance)
  2244                            true);
  2245                                 // Setting next fields of discovered
  2246                                 // lists requires a barrier.
  2248   // STW ref processor
  2249   _ref_processor_stw =
  2250     new ReferenceProcessor(mr,    // span
  2251                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
  2252                                 // mt processing
  2253                            MAX2((int)ParallelGCThreads, 1),
  2254                                 // degree of mt processing
  2255                            (ParallelGCThreads > 1),
  2256                                 // mt discovery
  2257                            MAX2((int)ParallelGCThreads, 1),
  2258                                 // degree of mt discovery
  2259                            true,
  2260                                 // Reference discovery is atomic
  2261                            &_is_alive_closure_stw,
  2262                                 // is alive closure
  2263                                 // (for efficiency/performance)
  2264                            false);
  2265                                 // Setting next fields of discovered
  2266                                 // lists requires a barrier.
  2269 size_t G1CollectedHeap::capacity() const {
  2270   return _g1_committed.byte_size();
  2273 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
  2274   assert(!hr->continuesHumongous(), "pre-condition");
  2275   hr->reset_gc_time_stamp();
  2276   if (hr->startsHumongous()) {
  2277     uint first_index = hr->hrs_index() + 1;
  2278     uint last_index = hr->last_hc_index();
  2279     for (uint i = first_index; i < last_index; i += 1) {
  2280       HeapRegion* chr = region_at(i);
  2281       assert(chr->continuesHumongous(), "sanity");
  2282       chr->reset_gc_time_stamp();
  2287 #ifndef PRODUCT
  2288 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
  2289 private:
  2290   unsigned _gc_time_stamp;
  2291   bool _failures;
  2293 public:
  2294   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
  2295     _gc_time_stamp(gc_time_stamp), _failures(false) { }
  2297   virtual bool doHeapRegion(HeapRegion* hr) {
  2298     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
  2299     if (_gc_time_stamp != region_gc_time_stamp) {
  2300       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
  2301                              "expected %d", HR_FORMAT_PARAMS(hr),
  2302                              region_gc_time_stamp, _gc_time_stamp);
  2303       _failures = true;
  2305     return false;
  2308   bool failures() { return _failures; }
  2309 };
  2311 void G1CollectedHeap::check_gc_time_stamps() {
  2312   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
  2313   heap_region_iterate(&cl);
  2314   guarantee(!cl.failures(), "all GC time stamps should have been reset");
  2316 #endif // PRODUCT
  2318 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  2319                                                  DirtyCardQueue* into_cset_dcq,
  2320                                                  bool concurrent,
  2321                                                  int worker_i) {
  2322   // Clean cards in the hot card cache
  2323   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
  2325   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2326   int n_completed_buffers = 0;
  2327   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
  2328     n_completed_buffers++;
  2330   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i,
  2331                                                   (double) n_completed_buffers);
  2332   dcqs.clear_n_completed_buffers();
  2333   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  2337 // Computes the sum of the storage used by the various regions.
  2339 size_t G1CollectedHeap::used() const {
  2340   assert(Heap_lock->owner() != NULL,
  2341          "Should be owned on this thread's behalf.");
  2342   size_t result = _summary_bytes_used;
  2343   // Read only once in case it is set to NULL concurrently
  2344   HeapRegion* hr = _mutator_alloc_region.get();
  2345   if (hr != NULL)
  2346     result += hr->used();
  2347   return result;
  2350 size_t G1CollectedHeap::used_unlocked() const {
  2351   size_t result = _summary_bytes_used;
  2352   return result;
  2355 class SumUsedClosure: public HeapRegionClosure {
  2356   size_t _used;
  2357 public:
  2358   SumUsedClosure() : _used(0) {}
  2359   bool doHeapRegion(HeapRegion* r) {
  2360     if (!r->continuesHumongous()) {
  2361       _used += r->used();
  2363     return false;
  2365   size_t result() { return _used; }
  2366 };
  2368 size_t G1CollectedHeap::recalculate_used() const {
  2369   SumUsedClosure blk;
  2370   heap_region_iterate(&blk);
  2371   return blk.result();
  2374 size_t G1CollectedHeap::unsafe_max_alloc() {
  2375   if (free_regions() > 0) return HeapRegion::GrainBytes;
  2376   // otherwise, is there space in the current allocation region?
  2378   // We need to store the current allocation region in a local variable
  2379   // here. The problem is that this method doesn't take any locks and
  2380   // there may be other threads which overwrite the current allocation
  2381   // region field. attempt_allocation(), for example, sets it to NULL
  2382   // and this can happen *after* the NULL check here but before the call
  2383   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  2384   // to be a problem in the optimized build, since the two loads of the
  2385   // current allocation region field are optimized away.
  2386   HeapRegion* hr = _mutator_alloc_region.get();
  2387   if (hr == NULL) {
  2388     return 0;
  2390   return hr->free();
  2393 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
  2394   switch (cause) {
  2395     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
  2396     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
  2397     case GCCause::_g1_humongous_allocation: return true;
  2398     default:                                return false;
  2402 #ifndef PRODUCT
  2403 void G1CollectedHeap::allocate_dummy_regions() {
  2404   // Let's fill up most of the region
  2405   size_t word_size = HeapRegion::GrainWords - 1024;
  2406   // And as a result the region we'll allocate will be humongous.
  2407   guarantee(isHumongous(word_size), "sanity");
  2409   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
  2410     // Let's use the existing mechanism for the allocation
  2411     HeapWord* dummy_obj = humongous_obj_allocate(word_size);
  2412     if (dummy_obj != NULL) {
  2413       MemRegion mr(dummy_obj, word_size);
  2414       CollectedHeap::fill_with_object(mr);
  2415     } else {
  2416       // If we can't allocate once, we probably cannot allocate
  2417       // again. Let's get out of the loop.
  2418       break;
  2422 #endif // !PRODUCT
  2424 void G1CollectedHeap::increment_old_marking_cycles_started() {
  2425   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
  2426     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
  2427     err_msg("Wrong marking cycle count (started: %d, completed: %d)",
  2428     _old_marking_cycles_started, _old_marking_cycles_completed));
  2430   _old_marking_cycles_started++;
  2433 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
  2434   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  2436   // We assume that if concurrent == true, then the caller is a
  2437   // concurrent thread that was joined the Suspendible Thread
  2438   // Set. If there's ever a cheap way to check this, we should add an
  2439   // assert here.
  2441   // Given that this method is called at the end of a Full GC or of a
  2442   // concurrent cycle, and those can be nested (i.e., a Full GC can
  2443   // interrupt a concurrent cycle), the number of full collections
  2444   // completed should be either one (in the case where there was no
  2445   // nesting) or two (when a Full GC interrupted a concurrent cycle)
  2446   // behind the number of full collections started.
  2448   // This is the case for the inner caller, i.e. a Full GC.
  2449   assert(concurrent ||
  2450          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
  2451          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
  2452          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
  2453                  "is inconsistent with _old_marking_cycles_completed = %u",
  2454                  _old_marking_cycles_started, _old_marking_cycles_completed));
  2456   // This is the case for the outer caller, i.e. the concurrent cycle.
  2457   assert(!concurrent ||
  2458          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
  2459          err_msg("for outer caller (concurrent cycle): "
  2460                  "_old_marking_cycles_started = %u "
  2461                  "is inconsistent with _old_marking_cycles_completed = %u",
  2462                  _old_marking_cycles_started, _old_marking_cycles_completed));
  2464   _old_marking_cycles_completed += 1;
  2466   // We need to clear the "in_progress" flag in the CM thread before
  2467   // we wake up any waiters (especially when ExplicitInvokesConcurrent
  2468   // is set) so that if a waiter requests another System.gc() it doesn't
  2469   // incorrectly see that a marking cyle is still in progress.
  2470   if (concurrent) {
  2471     _cmThread->clear_in_progress();
  2474   // This notify_all() will ensure that a thread that called
  2475   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2476   // and it's waiting for a full GC to finish will be woken up. It is
  2477   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2478   FullGCCount_lock->notify_all();
  2481 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  2482   assert_at_safepoint(true /* should_be_vm_thread */);
  2483   GCCauseSetter gcs(this, cause);
  2484   switch (cause) {
  2485     case GCCause::_heap_inspection:
  2486     case GCCause::_heap_dump: {
  2487       HandleMark hm;
  2488       do_full_collection(false);         // don't clear all soft refs
  2489       break;
  2491     default: // XXX FIX ME
  2492       ShouldNotReachHere(); // Unexpected use of this function
  2496 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2497   assert_heap_not_locked();
  2499   unsigned int gc_count_before;
  2500   unsigned int old_marking_count_before;
  2501   bool retry_gc;
  2503   do {
  2504     retry_gc = false;
  2507       MutexLocker ml(Heap_lock);
  2509       // Read the GC count while holding the Heap_lock
  2510       gc_count_before = total_collections();
  2511       old_marking_count_before = _old_marking_cycles_started;
  2514     if (should_do_concurrent_full_gc(cause)) {
  2515       // Schedule an initial-mark evacuation pause that will start a
  2516       // concurrent cycle. We're setting word_size to 0 which means that
  2517       // we are not requesting a post-GC allocation.
  2518       VM_G1IncCollectionPause op(gc_count_before,
  2519                                  0,     /* word_size */
  2520                                  true,  /* should_initiate_conc_mark */
  2521                                  g1_policy()->max_pause_time_ms(),
  2522                                  cause);
  2524       VMThread::execute(&op);
  2525       if (!op.pause_succeeded()) {
  2526         if (old_marking_count_before == _old_marking_cycles_started) {
  2527           retry_gc = op.should_retry_gc();
  2528         } else {
  2529           // A Full GC happened while we were trying to schedule the
  2530           // initial-mark GC. No point in starting a new cycle given
  2531           // that the whole heap was collected anyway.
  2534         if (retry_gc) {
  2535           if (GC_locker::is_active_and_needs_gc()) {
  2536             GC_locker::stall_until_clear();
  2540     } else {
  2541       if (cause == GCCause::_gc_locker
  2542           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2544         // Schedule a standard evacuation pause. We're setting word_size
  2545         // to 0 which means that we are not requesting a post-GC allocation.
  2546         VM_G1IncCollectionPause op(gc_count_before,
  2547                                    0,     /* word_size */
  2548                                    false, /* should_initiate_conc_mark */
  2549                                    g1_policy()->max_pause_time_ms(),
  2550                                    cause);
  2551         VMThread::execute(&op);
  2552       } else {
  2553         // Schedule a Full GC.
  2554         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
  2555         VMThread::execute(&op);
  2558   } while (retry_gc);
  2561 bool G1CollectedHeap::is_in(const void* p) const {
  2562   if (_g1_committed.contains(p)) {
  2563     // Given that we know that p is in the committed space,
  2564     // heap_region_containing_raw() should successfully
  2565     // return the containing region.
  2566     HeapRegion* hr = heap_region_containing_raw(p);
  2567     return hr->is_in(p);
  2568   } else {
  2569     return _perm_gen->as_gen()->is_in(p);
  2573 // Iteration functions.
  2575 // Iterates an OopClosure over all ref-containing fields of objects
  2576 // within a HeapRegion.
  2578 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  2579   MemRegion _mr;
  2580   OopClosure* _cl;
  2581 public:
  2582   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  2583     : _mr(mr), _cl(cl) {}
  2584   bool doHeapRegion(HeapRegion* r) {
  2585     if (!r->continuesHumongous()) {
  2586       r->oop_iterate(_cl);
  2588     return false;
  2590 };
  2592 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
  2593   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  2594   heap_region_iterate(&blk);
  2595   if (do_perm) {
  2596     perm_gen()->oop_iterate(cl);
  2600 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
  2601   IterateOopClosureRegionClosure blk(mr, cl);
  2602   heap_region_iterate(&blk);
  2603   if (do_perm) {
  2604     perm_gen()->oop_iterate(cl);
  2608 // Iterates an ObjectClosure over all objects within a HeapRegion.
  2610 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  2611   ObjectClosure* _cl;
  2612 public:
  2613   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  2614   bool doHeapRegion(HeapRegion* r) {
  2615     if (! r->continuesHumongous()) {
  2616       r->object_iterate(_cl);
  2618     return false;
  2620 };
  2622 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
  2623   IterateObjectClosureRegionClosure blk(cl);
  2624   heap_region_iterate(&blk);
  2625   if (do_perm) {
  2626     perm_gen()->object_iterate(cl);
  2630 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
  2631   // FIXME: is this right?
  2632   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
  2635 // Calls a SpaceClosure on a HeapRegion.
  2637 class SpaceClosureRegionClosure: public HeapRegionClosure {
  2638   SpaceClosure* _cl;
  2639 public:
  2640   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  2641   bool doHeapRegion(HeapRegion* r) {
  2642     _cl->do_space(r);
  2643     return false;
  2645 };
  2647 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  2648   SpaceClosureRegionClosure blk(cl);
  2649   heap_region_iterate(&blk);
  2652 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
  2653   _hrs.iterate(cl);
  2656 void
  2657 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  2658                                                  uint worker_id,
  2659                                                  uint no_of_par_workers,
  2660                                                  jint claim_value) {
  2661   const uint regions = n_regions();
  2662   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  2663                              no_of_par_workers :
  2664                              1);
  2665   assert(UseDynamicNumberOfGCThreads ||
  2666          no_of_par_workers == workers()->total_workers(),
  2667          "Non dynamic should use fixed number of workers");
  2668   // try to spread out the starting points of the workers
  2669   const HeapRegion* start_hr =
  2670                         start_region_for_worker(worker_id, no_of_par_workers);
  2671   const uint start_index = start_hr->hrs_index();
  2673   // each worker will actually look at all regions
  2674   for (uint count = 0; count < regions; ++count) {
  2675     const uint index = (start_index + count) % regions;
  2676     assert(0 <= index && index < regions, "sanity");
  2677     HeapRegion* r = region_at(index);
  2678     // we'll ignore "continues humongous" regions (we'll process them
  2679     // when we come across their corresponding "start humongous"
  2680     // region) and regions already claimed
  2681     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  2682       continue;
  2684     // OK, try to claim it
  2685     if (r->claimHeapRegion(claim_value)) {
  2686       // success!
  2687       assert(!r->continuesHumongous(), "sanity");
  2688       if (r->startsHumongous()) {
  2689         // If the region is "starts humongous" we'll iterate over its
  2690         // "continues humongous" first; in fact we'll do them
  2691         // first. The order is important. In on case, calling the
  2692         // closure on the "starts humongous" region might de-allocate
  2693         // and clear all its "continues humongous" regions and, as a
  2694         // result, we might end up processing them twice. So, we'll do
  2695         // them first (notice: most closures will ignore them anyway) and
  2696         // then we'll do the "starts humongous" region.
  2697         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
  2698           HeapRegion* chr = region_at(ch_index);
  2700           // if the region has already been claimed or it's not
  2701           // "continues humongous" we're done
  2702           if (chr->claim_value() == claim_value ||
  2703               !chr->continuesHumongous()) {
  2704             break;
  2707           // Noone should have claimed it directly. We can given
  2708           // that we claimed its "starts humongous" region.
  2709           assert(chr->claim_value() != claim_value, "sanity");
  2710           assert(chr->humongous_start_region() == r, "sanity");
  2712           if (chr->claimHeapRegion(claim_value)) {
  2713             // we should always be able to claim it; noone else should
  2714             // be trying to claim this region
  2716             bool res2 = cl->doHeapRegion(chr);
  2717             assert(!res2, "Should not abort");
  2719             // Right now, this holds (i.e., no closure that actually
  2720             // does something with "continues humongous" regions
  2721             // clears them). We might have to weaken it in the future,
  2722             // but let's leave these two asserts here for extra safety.
  2723             assert(chr->continuesHumongous(), "should still be the case");
  2724             assert(chr->humongous_start_region() == r, "sanity");
  2725           } else {
  2726             guarantee(false, "we should not reach here");
  2731       assert(!r->continuesHumongous(), "sanity");
  2732       bool res = cl->doHeapRegion(r);
  2733       assert(!res, "Should not abort");
  2738 class ResetClaimValuesClosure: public HeapRegionClosure {
  2739 public:
  2740   bool doHeapRegion(HeapRegion* r) {
  2741     r->set_claim_value(HeapRegion::InitialClaimValue);
  2742     return false;
  2744 };
  2746 void G1CollectedHeap::reset_heap_region_claim_values() {
  2747   ResetClaimValuesClosure blk;
  2748   heap_region_iterate(&blk);
  2751 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
  2752   ResetClaimValuesClosure blk;
  2753   collection_set_iterate(&blk);
  2756 #ifdef ASSERT
  2757 // This checks whether all regions in the heap have the correct claim
  2758 // value. I also piggy-backed on this a check to ensure that the
  2759 // humongous_start_region() information on "continues humongous"
  2760 // regions is correct.
  2762 class CheckClaimValuesClosure : public HeapRegionClosure {
  2763 private:
  2764   jint _claim_value;
  2765   uint _failures;
  2766   HeapRegion* _sh_region;
  2768 public:
  2769   CheckClaimValuesClosure(jint claim_value) :
  2770     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  2771   bool doHeapRegion(HeapRegion* r) {
  2772     if (r->claim_value() != _claim_value) {
  2773       gclog_or_tty->print_cr("Region " HR_FORMAT ", "
  2774                              "claim value = %d, should be %d",
  2775                              HR_FORMAT_PARAMS(r),
  2776                              r->claim_value(), _claim_value);
  2777       ++_failures;
  2779     if (!r->isHumongous()) {
  2780       _sh_region = NULL;
  2781     } else if (r->startsHumongous()) {
  2782       _sh_region = r;
  2783     } else if (r->continuesHumongous()) {
  2784       if (r->humongous_start_region() != _sh_region) {
  2785         gclog_or_tty->print_cr("Region " HR_FORMAT ", "
  2786                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  2787                                HR_FORMAT_PARAMS(r),
  2788                                r->humongous_start_region(),
  2789                                _sh_region);
  2790         ++_failures;
  2793     return false;
  2795   uint failures() { return _failures; }
  2796 };
  2798 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  2799   CheckClaimValuesClosure cl(claim_value);
  2800   heap_region_iterate(&cl);
  2801   return cl.failures() == 0;
  2804 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
  2805 private:
  2806   jint _claim_value;
  2807   uint _failures;
  2809 public:
  2810   CheckClaimValuesInCSetHRClosure(jint claim_value) :
  2811     _claim_value(claim_value), _failures(0) { }
  2813   uint failures() { return _failures; }
  2815   bool doHeapRegion(HeapRegion* hr) {
  2816     assert(hr->in_collection_set(), "how?");
  2817     assert(!hr->isHumongous(), "H-region in CSet");
  2818     if (hr->claim_value() != _claim_value) {
  2819       gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
  2820                              "claim value = %d, should be %d",
  2821                              HR_FORMAT_PARAMS(hr),
  2822                              hr->claim_value(), _claim_value);
  2823       _failures += 1;
  2825     return false;
  2827 };
  2829 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
  2830   CheckClaimValuesInCSetHRClosure cl(claim_value);
  2831   collection_set_iterate(&cl);
  2832   return cl.failures() == 0;
  2834 #endif // ASSERT
  2836 // Clear the cached CSet starting regions and (more importantly)
  2837 // the time stamps. Called when we reset the GC time stamp.
  2838 void G1CollectedHeap::clear_cset_start_regions() {
  2839   assert(_worker_cset_start_region != NULL, "sanity");
  2840   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
  2842   int n_queues = MAX2((int)ParallelGCThreads, 1);
  2843   for (int i = 0; i < n_queues; i++) {
  2844     _worker_cset_start_region[i] = NULL;
  2845     _worker_cset_start_region_time_stamp[i] = 0;
  2849 // Given the id of a worker, obtain or calculate a suitable
  2850 // starting region for iterating over the current collection set.
  2851 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
  2852   assert(get_gc_time_stamp() > 0, "should have been updated by now");
  2854   HeapRegion* result = NULL;
  2855   unsigned gc_time_stamp = get_gc_time_stamp();
  2857   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
  2858     // Cached starting region for current worker was set
  2859     // during the current pause - so it's valid.
  2860     // Note: the cached starting heap region may be NULL
  2861     // (when the collection set is empty).
  2862     result = _worker_cset_start_region[worker_i];
  2863     assert(result == NULL || result->in_collection_set(), "sanity");
  2864     return result;
  2867   // The cached entry was not valid so let's calculate
  2868   // a suitable starting heap region for this worker.
  2870   // We want the parallel threads to start their collection
  2871   // set iteration at different collection set regions to
  2872   // avoid contention.
  2873   // If we have:
  2874   //          n collection set regions
  2875   //          p threads
  2876   // Then thread t will start at region floor ((t * n) / p)
  2878   result = g1_policy()->collection_set();
  2879   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2880     uint cs_size = g1_policy()->cset_region_length();
  2881     uint active_workers = workers()->active_workers();
  2882     assert(UseDynamicNumberOfGCThreads ||
  2883              active_workers == workers()->total_workers(),
  2884              "Unless dynamic should use total workers");
  2886     uint end_ind   = (cs_size * worker_i) / active_workers;
  2887     uint start_ind = 0;
  2889     if (worker_i > 0 &&
  2890         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
  2891       // Previous workers starting region is valid
  2892       // so let's iterate from there
  2893       start_ind = (cs_size * (worker_i - 1)) / active_workers;
  2894       result = _worker_cset_start_region[worker_i - 1];
  2897     for (uint i = start_ind; i < end_ind; i++) {
  2898       result = result->next_in_collection_set();
  2902   // Note: the calculated starting heap region may be NULL
  2903   // (when the collection set is empty).
  2904   assert(result == NULL || result->in_collection_set(), "sanity");
  2905   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
  2906          "should be updated only once per pause");
  2907   _worker_cset_start_region[worker_i] = result;
  2908   OrderAccess::storestore();
  2909   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
  2910   return result;
  2913 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
  2914                                                      uint no_of_par_workers) {
  2915   uint worker_num =
  2916            G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
  2917   assert(UseDynamicNumberOfGCThreads ||
  2918          no_of_par_workers == workers()->total_workers(),
  2919          "Non dynamic should use fixed number of workers");
  2920   const uint start_index = n_regions() * worker_i / worker_num;
  2921   return region_at(start_index);
  2924 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2925   HeapRegion* r = g1_policy()->collection_set();
  2926   while (r != NULL) {
  2927     HeapRegion* next = r->next_in_collection_set();
  2928     if (cl->doHeapRegion(r)) {
  2929       cl->incomplete();
  2930       return;
  2932     r = next;
  2936 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2937                                                   HeapRegionClosure *cl) {
  2938   if (r == NULL) {
  2939     // The CSet is empty so there's nothing to do.
  2940     return;
  2943   assert(r->in_collection_set(),
  2944          "Start region must be a member of the collection set.");
  2945   HeapRegion* cur = r;
  2946   while (cur != NULL) {
  2947     HeapRegion* next = cur->next_in_collection_set();
  2948     if (cl->doHeapRegion(cur) && false) {
  2949       cl->incomplete();
  2950       return;
  2952     cur = next;
  2954   cur = g1_policy()->collection_set();
  2955   while (cur != r) {
  2956     HeapRegion* next = cur->next_in_collection_set();
  2957     if (cl->doHeapRegion(cur) && false) {
  2958       cl->incomplete();
  2959       return;
  2961     cur = next;
  2965 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
  2966   return n_regions() > 0 ? region_at(0) : NULL;
  2970 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2971   Space* res = heap_region_containing(addr);
  2972   if (res == NULL)
  2973     res = perm_gen()->space_containing(addr);
  2974   return res;
  2977 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2978   Space* sp = space_containing(addr);
  2979   if (sp != NULL) {
  2980     return sp->block_start(addr);
  2982   return NULL;
  2985 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  2986   Space* sp = space_containing(addr);
  2987   assert(sp != NULL, "block_size of address outside of heap");
  2988   return sp->block_size(addr);
  2991 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  2992   Space* sp = space_containing(addr);
  2993   return sp->block_is_obj(addr);
  2996 bool G1CollectedHeap::supports_tlab_allocation() const {
  2997   return true;
  3000 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  3001   return HeapRegion::GrainBytes;
  3004 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  3005   // Return the remaining space in the cur alloc region, but not less than
  3006   // the min TLAB size.
  3008   // Also, this value can be at most the humongous object threshold,
  3009   // since we can't allow tlabs to grow big enough to accomodate
  3010   // humongous objects.
  3012   HeapRegion* hr = _mutator_alloc_region.get();
  3013   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
  3014   if (hr == NULL) {
  3015     return max_tlab_size;
  3016   } else {
  3017     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
  3021 size_t G1CollectedHeap::max_capacity() const {
  3022   return _g1_reserved.byte_size();
  3025 jlong G1CollectedHeap::millis_since_last_gc() {
  3026   // assert(false, "NYI");
  3027   return 0;
  3030 void G1CollectedHeap::prepare_for_verify() {
  3031   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  3032     ensure_parsability(false);
  3034   g1_rem_set()->prepare_for_verify();
  3037 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
  3038                                               VerifyOption vo) {
  3039   switch (vo) {
  3040   case VerifyOption_G1UsePrevMarking:
  3041     return hr->obj_allocated_since_prev_marking(obj);
  3042   case VerifyOption_G1UseNextMarking:
  3043     return hr->obj_allocated_since_next_marking(obj);
  3044   case VerifyOption_G1UseMarkWord:
  3045     return false;
  3046   default:
  3047     ShouldNotReachHere();
  3049   return false; // keep some compilers happy
  3052 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
  3053   switch (vo) {
  3054   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
  3055   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
  3056   case VerifyOption_G1UseMarkWord:    return NULL;
  3057   default:                            ShouldNotReachHere();
  3059   return NULL; // keep some compilers happy
  3062 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
  3063   switch (vo) {
  3064   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
  3065   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
  3066   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
  3067   default:                            ShouldNotReachHere();
  3069   return false; // keep some compilers happy
  3072 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
  3073   switch (vo) {
  3074   case VerifyOption_G1UsePrevMarking: return "PTAMS";
  3075   case VerifyOption_G1UseNextMarking: return "NTAMS";
  3076   case VerifyOption_G1UseMarkWord:    return "NONE";
  3077   default:                            ShouldNotReachHere();
  3079   return NULL; // keep some compilers happy
  3082 class VerifyLivenessOopClosure: public OopClosure {
  3083   G1CollectedHeap* _g1h;
  3084   VerifyOption _vo;
  3085 public:
  3086   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
  3087     _g1h(g1h), _vo(vo)
  3088   { }
  3089   void do_oop(narrowOop *p) { do_oop_work(p); }
  3090   void do_oop(      oop *p) { do_oop_work(p); }
  3092   template <class T> void do_oop_work(T *p) {
  3093     oop obj = oopDesc::load_decode_heap_oop(p);
  3094     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
  3095               "Dead object referenced by a not dead object");
  3097 };
  3099 class VerifyObjsInRegionClosure: public ObjectClosure {
  3100 private:
  3101   G1CollectedHeap* _g1h;
  3102   size_t _live_bytes;
  3103   HeapRegion *_hr;
  3104   VerifyOption _vo;
  3105 public:
  3106   // _vo == UsePrevMarking -> use "prev" marking information,
  3107   // _vo == UseNextMarking -> use "next" marking information,
  3108   // _vo == UseMarkWord    -> use mark word from object header.
  3109   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
  3110     : _live_bytes(0), _hr(hr), _vo(vo) {
  3111     _g1h = G1CollectedHeap::heap();
  3113   void do_object(oop o) {
  3114     VerifyLivenessOopClosure isLive(_g1h, _vo);
  3115     assert(o != NULL, "Huh?");
  3116     if (!_g1h->is_obj_dead_cond(o, _vo)) {
  3117       // If the object is alive according to the mark word,
  3118       // then verify that the marking information agrees.
  3119       // Note we can't verify the contra-positive of the
  3120       // above: if the object is dead (according to the mark
  3121       // word), it may not be marked, or may have been marked
  3122       // but has since became dead, or may have been allocated
  3123       // since the last marking.
  3124       if (_vo == VerifyOption_G1UseMarkWord) {
  3125         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
  3128       o->oop_iterate(&isLive);
  3129       if (!_hr->obj_allocated_since_prev_marking(o)) {
  3130         size_t obj_size = o->size();    // Make sure we don't overflow
  3131         _live_bytes += (obj_size * HeapWordSize);
  3135   size_t live_bytes() { return _live_bytes; }
  3136 };
  3138 class PrintObjsInRegionClosure : public ObjectClosure {
  3139   HeapRegion *_hr;
  3140   G1CollectedHeap *_g1;
  3141 public:
  3142   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  3143     _g1 = G1CollectedHeap::heap();
  3144   };
  3146   void do_object(oop o) {
  3147     if (o != NULL) {
  3148       HeapWord *start = (HeapWord *) o;
  3149       size_t word_sz = o->size();
  3150       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  3151                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  3152                           (void*) o, word_sz,
  3153                           _g1->isMarkedPrev(o),
  3154                           _g1->isMarkedNext(o),
  3155                           _hr->obj_allocated_since_prev_marking(o));
  3156       HeapWord *end = start + word_sz;
  3157       HeapWord *cur;
  3158       int *val;
  3159       for (cur = start; cur < end; cur++) {
  3160         val = (int *) cur;
  3161         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  3165 };
  3167 class VerifyRegionClosure: public HeapRegionClosure {
  3168 private:
  3169   bool             _par;
  3170   VerifyOption     _vo;
  3171   bool             _failures;
  3172 public:
  3173   // _vo == UsePrevMarking -> use "prev" marking information,
  3174   // _vo == UseNextMarking -> use "next" marking information,
  3175   // _vo == UseMarkWord    -> use mark word from object header.
  3176   VerifyRegionClosure(bool par, VerifyOption vo)
  3177     : _par(par),
  3178       _vo(vo),
  3179       _failures(false) {}
  3181   bool failures() {
  3182     return _failures;
  3185   bool doHeapRegion(HeapRegion* r) {
  3186     if (!r->continuesHumongous()) {
  3187       bool failures = false;
  3188       r->verify(_vo, &failures);
  3189       if (failures) {
  3190         _failures = true;
  3191       } else {
  3192         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
  3193         r->object_iterate(&not_dead_yet_cl);
  3194         if (_vo != VerifyOption_G1UseNextMarking) {
  3195           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
  3196             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
  3197                                    "max_live_bytes "SIZE_FORMAT" "
  3198                                    "< calculated "SIZE_FORMAT,
  3199                                    r->bottom(), r->end(),
  3200                                    r->max_live_bytes(),
  3201                                  not_dead_yet_cl.live_bytes());
  3202             _failures = true;
  3204         } else {
  3205           // When vo == UseNextMarking we cannot currently do a sanity
  3206           // check on the live bytes as the calculation has not been
  3207           // finalized yet.
  3211     return false; // stop the region iteration if we hit a failure
  3213 };
  3215 class VerifyRootsClosure: public OopsInGenClosure {
  3216 private:
  3217   G1CollectedHeap* _g1h;
  3218   VerifyOption     _vo;
  3219   bool             _failures;
  3220 public:
  3221   // _vo == UsePrevMarking -> use "prev" marking information,
  3222   // _vo == UseNextMarking -> use "next" marking information,
  3223   // _vo == UseMarkWord    -> use mark word from object header.
  3224   VerifyRootsClosure(VerifyOption vo) :
  3225     _g1h(G1CollectedHeap::heap()),
  3226     _vo(vo),
  3227     _failures(false) { }
  3229   bool failures() { return _failures; }
  3231   template <class T> void do_oop_nv(T* p) {
  3232     T heap_oop = oopDesc::load_heap_oop(p);
  3233     if (!oopDesc::is_null(heap_oop)) {
  3234       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  3235       if (_g1h->is_obj_dead_cond(obj, _vo)) {
  3236         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  3237                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
  3238         if (_vo == VerifyOption_G1UseMarkWord) {
  3239           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
  3241         obj->print_on(gclog_or_tty);
  3242         _failures = true;
  3247   void do_oop(oop* p)       { do_oop_nv(p); }
  3248   void do_oop(narrowOop* p) { do_oop_nv(p); }
  3249 };
  3251 // This is the task used for parallel heap verification.
  3253 class G1ParVerifyTask: public AbstractGangTask {
  3254 private:
  3255   G1CollectedHeap* _g1h;
  3256   VerifyOption     _vo;
  3257   bool             _failures;
  3259 public:
  3260   // _vo == UsePrevMarking -> use "prev" marking information,
  3261   // _vo == UseNextMarking -> use "next" marking information,
  3262   // _vo == UseMarkWord    -> use mark word from object header.
  3263   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
  3264     AbstractGangTask("Parallel verify task"),
  3265     _g1h(g1h),
  3266     _vo(vo),
  3267     _failures(false) { }
  3269   bool failures() {
  3270     return _failures;
  3273   void work(uint worker_id) {
  3274     HandleMark hm;
  3275     VerifyRegionClosure blk(true, _vo);
  3276     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
  3277                                           _g1h->workers()->active_workers(),
  3278                                           HeapRegion::ParVerifyClaimValue);
  3279     if (blk.failures()) {
  3280       _failures = true;
  3283 };
  3285 void G1CollectedHeap::verify(bool silent) {
  3286   verify(silent, VerifyOption_G1UsePrevMarking);
  3289 void G1CollectedHeap::verify(bool silent,
  3290                              VerifyOption vo) {
  3291   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  3292     if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
  3293     VerifyRootsClosure rootsCl(vo);
  3295     assert(Thread::current()->is_VM_thread(),
  3296       "Expected to be executed serially by the VM thread at this point");
  3298     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
  3300     // We apply the relevant closures to all the oops in the
  3301     // system dictionary, the string table and the code cache.
  3302     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
  3304     process_strong_roots(true,      // activate StrongRootsScope
  3305                          true,      // we set "collecting perm gen" to true,
  3306                                     // so we don't reset the dirty cards in the perm gen.
  3307                          ScanningOption(so),  // roots scanning options
  3308                          &rootsCl,
  3309                          &blobsCl,
  3310                          &rootsCl);
  3312     // If we're verifying after the marking phase of a Full GC then we can't
  3313     // treat the perm gen as roots into the G1 heap. Some of the objects in
  3314     // the perm gen may be dead and hence not marked. If one of these dead
  3315     // objects is considered to be a root then we may end up with a false
  3316     // "Root location <x> points to dead ob <y>" failure.
  3317     if (vo != VerifyOption_G1UseMarkWord) {
  3318       // Since we used "collecting_perm_gen" == true above, we will not have
  3319       // checked the refs from perm into the G1-collected heap. We check those
  3320       // references explicitly below. Whether the relevant cards are dirty
  3321       // is checked further below in the rem set verification.
  3322       if (!silent) { gclog_or_tty->print("Permgen roots "); }
  3323       perm_gen()->oop_iterate(&rootsCl);
  3325     bool failures = rootsCl.failures();
  3327     if (vo != VerifyOption_G1UseMarkWord) {
  3328       // If we're verifying during a full GC then the region sets
  3329       // will have been torn down at the start of the GC. Therefore
  3330       // verifying the region sets will fail. So we only verify
  3331       // the region sets when not in a full GC.
  3332       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
  3333       verify_region_sets();
  3336     if (!silent) { gclog_or_tty->print("HeapRegions "); }
  3337     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  3338       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3339              "sanity check");
  3341       G1ParVerifyTask task(this, vo);
  3342       assert(UseDynamicNumberOfGCThreads ||
  3343         workers()->active_workers() == workers()->total_workers(),
  3344         "If not dynamic should be using all the workers");
  3345       int n_workers = workers()->active_workers();
  3346       set_par_threads(n_workers);
  3347       workers()->run_task(&task);
  3348       set_par_threads(0);
  3349       if (task.failures()) {
  3350         failures = true;
  3353       // Checks that the expected amount of parallel work was done.
  3354       // The implication is that n_workers is > 0.
  3355       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  3356              "sanity check");
  3358       reset_heap_region_claim_values();
  3360       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3361              "sanity check");
  3362     } else {
  3363       VerifyRegionClosure blk(false, vo);
  3364       heap_region_iterate(&blk);
  3365       if (blk.failures()) {
  3366         failures = true;
  3369     if (!silent) gclog_or_tty->print("RemSet ");
  3370     rem_set()->verify();
  3372     if (failures) {
  3373       gclog_or_tty->print_cr("Heap:");
  3374       // It helps to have the per-region information in the output to
  3375       // help us track down what went wrong. This is why we call
  3376       // print_extended_on() instead of print_on().
  3377       print_extended_on(gclog_or_tty);
  3378       gclog_or_tty->print_cr("");
  3379 #ifndef PRODUCT
  3380       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
  3381         concurrent_mark()->print_reachable("at-verification-failure",
  3382                                            vo, false /* all */);
  3384 #endif
  3385       gclog_or_tty->flush();
  3387     guarantee(!failures, "there should not have been any failures");
  3388   } else {
  3389     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
  3393 class PrintRegionClosure: public HeapRegionClosure {
  3394   outputStream* _st;
  3395 public:
  3396   PrintRegionClosure(outputStream* st) : _st(st) {}
  3397   bool doHeapRegion(HeapRegion* r) {
  3398     r->print_on(_st);
  3399     return false;
  3401 };
  3403 void G1CollectedHeap::print_on(outputStream* st) const {
  3404   st->print(" %-20s", "garbage-first heap");
  3405   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  3406             capacity()/K, used_unlocked()/K);
  3407   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  3408             _g1_storage.low_boundary(),
  3409             _g1_storage.high(),
  3410             _g1_storage.high_boundary());
  3411   st->cr();
  3412   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
  3413   uint young_regions = _young_list->length();
  3414   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
  3415             (size_t) young_regions * HeapRegion::GrainBytes / K);
  3416   uint survivor_regions = g1_policy()->recorded_survivor_regions();
  3417   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
  3418             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
  3419   st->cr();
  3420   perm()->as_gen()->print_on(st);
  3423 void G1CollectedHeap::print_extended_on(outputStream* st) const {
  3424   print_on(st);
  3426   // Print the per-region information.
  3427   st->cr();
  3428   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
  3429                "HS=humongous(starts), HC=humongous(continues), "
  3430                "CS=collection set, F=free, TS=gc time stamp, "
  3431                "PTAMS=previous top-at-mark-start, "
  3432                "NTAMS=next top-at-mark-start)");
  3433   PrintRegionClosure blk(st);
  3434   heap_region_iterate(&blk);
  3437 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  3438   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3439     workers()->print_worker_threads_on(st);
  3441   _cmThread->print_on(st);
  3442   st->cr();
  3443   _cm->print_worker_threads_on(st);
  3444   _cg1r->print_worker_threads_on(st);
  3445   st->cr();
  3448 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  3449   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3450     workers()->threads_do(tc);
  3452   tc->do_thread(_cmThread);
  3453   _cg1r->threads_do(tc);
  3456 void G1CollectedHeap::print_tracing_info() const {
  3457   // We'll overload this to mean "trace GC pause statistics."
  3458   if (TraceGen0Time || TraceGen1Time) {
  3459     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  3460     // to that.
  3461     g1_policy()->print_tracing_info();
  3463   if (G1SummarizeRSetStats) {
  3464     g1_rem_set()->print_summary_info();
  3466   if (G1SummarizeConcMark) {
  3467     concurrent_mark()->print_summary_info();
  3469   g1_policy()->print_yg_surv_rate_info();
  3470   SpecializationStats::print();
  3473 #ifndef PRODUCT
  3474 // Helpful for debugging RSet issues.
  3476 class PrintRSetsClosure : public HeapRegionClosure {
  3477 private:
  3478   const char* _msg;
  3479   size_t _occupied_sum;
  3481 public:
  3482   bool doHeapRegion(HeapRegion* r) {
  3483     HeapRegionRemSet* hrrs = r->rem_set();
  3484     size_t occupied = hrrs->occupied();
  3485     _occupied_sum += occupied;
  3487     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
  3488                            HR_FORMAT_PARAMS(r));
  3489     if (occupied == 0) {
  3490       gclog_or_tty->print_cr("  RSet is empty");
  3491     } else {
  3492       hrrs->print();
  3494     gclog_or_tty->print_cr("----------");
  3495     return false;
  3498   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
  3499     gclog_or_tty->cr();
  3500     gclog_or_tty->print_cr("========================================");
  3501     gclog_or_tty->print_cr(msg);
  3502     gclog_or_tty->cr();
  3505   ~PrintRSetsClosure() {
  3506     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
  3507     gclog_or_tty->print_cr("========================================");
  3508     gclog_or_tty->cr();
  3510 };
  3512 void G1CollectedHeap::print_cset_rsets() {
  3513   PrintRSetsClosure cl("Printing CSet RSets");
  3514   collection_set_iterate(&cl);
  3517 void G1CollectedHeap::print_all_rsets() {
  3518   PrintRSetsClosure cl("Printing All RSets");;
  3519   heap_region_iterate(&cl);
  3521 #endif // PRODUCT
  3523 G1CollectedHeap* G1CollectedHeap::heap() {
  3524   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  3525          "not a garbage-first heap");
  3526   return _g1h;
  3529 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  3530   // always_do_update_barrier = false;
  3531   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  3532   // Call allocation profiler
  3533   AllocationProfiler::iterate_since_last_gc();
  3534   // Fill TLAB's and such
  3535   ensure_parsability(true);
  3538 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  3539   // FIXME: what is this about?
  3540   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  3541   // is set.
  3542   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  3543                         "derived pointer present"));
  3544   // always_do_update_barrier = true;
  3546   // We have just completed a GC. Update the soft reference
  3547   // policy with the new heap occupancy
  3548   Universe::update_heap_info_at_gc();
  3551 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  3552                                                unsigned int gc_count_before,
  3553                                                bool* succeeded) {
  3554   assert_heap_not_locked_and_not_at_safepoint();
  3555   g1_policy()->record_stop_world_start();
  3556   VM_G1IncCollectionPause op(gc_count_before,
  3557                              word_size,
  3558                              false, /* should_initiate_conc_mark */
  3559                              g1_policy()->max_pause_time_ms(),
  3560                              GCCause::_g1_inc_collection_pause);
  3561   VMThread::execute(&op);
  3563   HeapWord* result = op.result();
  3564   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
  3565   assert(result == NULL || ret_succeeded,
  3566          "the result should be NULL if the VM did not succeed");
  3567   *succeeded = ret_succeeded;
  3569   assert_heap_not_locked();
  3570   return result;
  3573 void
  3574 G1CollectedHeap::doConcurrentMark() {
  3575   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  3576   if (!_cmThread->in_progress()) {
  3577     _cmThread->set_started();
  3578     CGC_lock->notify();
  3582 size_t G1CollectedHeap::pending_card_num() {
  3583   size_t extra_cards = 0;
  3584   JavaThread *curr = Threads::first();
  3585   while (curr != NULL) {
  3586     DirtyCardQueue& dcq = curr->dirty_card_queue();
  3587     extra_cards += dcq.size();
  3588     curr = curr->next();
  3590   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3591   size_t buffer_size = dcqs.buffer_size();
  3592   size_t buffer_num = dcqs.completed_buffers_num();
  3594   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
  3595   // in bytes - not the number of 'entries'. We need to convert
  3596   // into a number of cards.
  3597   return (buffer_size * buffer_num + extra_cards) / oopSize;
  3600 size_t G1CollectedHeap::cards_scanned() {
  3601   return g1_rem_set()->cardsScanned();
  3604 void
  3605 G1CollectedHeap::setup_surviving_young_words() {
  3606   assert(_surviving_young_words == NULL, "pre-condition");
  3607   uint array_length = g1_policy()->young_cset_region_length();
  3608   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
  3609   if (_surviving_young_words == NULL) {
  3610     vm_exit_out_of_memory(sizeof(size_t) * array_length,
  3611                           "Not enough space for young surv words summary.");
  3613   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
  3614 #ifdef ASSERT
  3615   for (uint i = 0;  i < array_length; ++i) {
  3616     assert( _surviving_young_words[i] == 0, "memset above" );
  3618 #endif // !ASSERT
  3621 void
  3622 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  3623   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3624   uint array_length = g1_policy()->young_cset_region_length();
  3625   for (uint i = 0; i < array_length; ++i) {
  3626     _surviving_young_words[i] += surv_young_words[i];
  3630 void
  3631 G1CollectedHeap::cleanup_surviving_young_words() {
  3632   guarantee( _surviving_young_words != NULL, "pre-condition" );
  3633   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
  3634   _surviving_young_words = NULL;
  3637 #ifdef ASSERT
  3638 class VerifyCSetClosure: public HeapRegionClosure {
  3639 public:
  3640   bool doHeapRegion(HeapRegion* hr) {
  3641     // Here we check that the CSet region's RSet is ready for parallel
  3642     // iteration. The fields that we'll verify are only manipulated
  3643     // when the region is part of a CSet and is collected. Afterwards,
  3644     // we reset these fields when we clear the region's RSet (when the
  3645     // region is freed) so they are ready when the region is
  3646     // re-allocated. The only exception to this is if there's an
  3647     // evacuation failure and instead of freeing the region we leave
  3648     // it in the heap. In that case, we reset these fields during
  3649     // evacuation failure handling.
  3650     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
  3652     // Here's a good place to add any other checks we'd like to
  3653     // perform on CSet regions.
  3654     return false;
  3656 };
  3657 #endif // ASSERT
  3659 #if TASKQUEUE_STATS
  3660 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
  3661   st->print_raw_cr("GC Task Stats");
  3662   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
  3663   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
  3666 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
  3667   print_taskqueue_stats_hdr(st);
  3669   TaskQueueStats totals;
  3670   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3671   for (int i = 0; i < n; ++i) {
  3672     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
  3673     totals += task_queue(i)->stats;
  3675   st->print_raw("tot "); totals.print(st); st->cr();
  3677   DEBUG_ONLY(totals.verify());
  3680 void G1CollectedHeap::reset_taskqueue_stats() {
  3681   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3682   for (int i = 0; i < n; ++i) {
  3683     task_queue(i)->stats.reset();
  3686 #endif // TASKQUEUE_STATS
  3688 bool
  3689 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
  3690   assert_at_safepoint(true /* should_be_vm_thread */);
  3691   guarantee(!is_gc_active(), "collection is not reentrant");
  3693   if (GC_locker::check_active_before_gc()) {
  3694     return false;
  3697   SvcGCMarker sgcm(SvcGCMarker::MINOR);
  3698   ResourceMark rm;
  3700   print_heap_before_gc();
  3702   HRSPhaseSetter x(HRSPhaseEvacuation);
  3703   verify_region_sets_optional();
  3704   verify_dirty_young_regions();
  3706   // This call will decide whether this pause is an initial-mark
  3707   // pause. If it is, during_initial_mark_pause() will return true
  3708   // for the duration of this pause.
  3709   g1_policy()->decide_on_conc_mark_initiation();
  3711   // We do not allow initial-mark to be piggy-backed on a mixed GC.
  3712   assert(!g1_policy()->during_initial_mark_pause() ||
  3713           g1_policy()->gcs_are_young(), "sanity");
  3715   // We also do not allow mixed GCs during marking.
  3716   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
  3718   // Record whether this pause is an initial mark. When the current
  3719   // thread has completed its logging output and it's safe to signal
  3720   // the CM thread, the flag's value in the policy has been reset.
  3721   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
  3723   // Inner scope for scope based logging, timers, and stats collection
  3725     if (g1_policy()->during_initial_mark_pause()) {
  3726       // We are about to start a marking cycle, so we increment the
  3727       // full collection counter.
  3728       increment_old_marking_cycles_started();
  3730     // if the log level is "finer" is on, we'll print long statistics information
  3731     // in the collector policy code, so let's not print this as the output
  3732     // is messy if we do.
  3733     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
  3734     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  3736     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  3737                                 workers()->active_workers() : 1);
  3738     g1_policy()->phase_times()->note_gc_start(os::elapsedTime(), active_workers,
  3739       g1_policy()->gcs_are_young(), g1_policy()->during_initial_mark_pause(), gc_cause());
  3741     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
  3742     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
  3744     // If the secondary_free_list is not empty, append it to the
  3745     // free_list. No need to wait for the cleanup operation to finish;
  3746     // the region allocation code will check the secondary_free_list
  3747     // and wait if necessary. If the G1StressConcRegionFreeing flag is
  3748     // set, skip this step so that the region allocation code has to
  3749     // get entries from the secondary_free_list.
  3750     if (!G1StressConcRegionFreeing) {
  3751       append_secondary_free_list_if_not_empty_with_lock();
  3754     assert(check_young_list_well_formed(),
  3755       "young list should be well formed");
  3757     // Don't dynamically change the number of GC threads this early.  A value of
  3758     // 0 is used to indicate serial work.  When parallel work is done,
  3759     // it will be set.
  3761     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  3762       IsGCActiveMark x;
  3764       gc_prologue(false);
  3765       increment_total_collections(false /* full gc */);
  3766       increment_gc_time_stamp();
  3768       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  3769         HandleMark hm;  // Discard invalid handles created during verification
  3770         gclog_or_tty->print(" VerifyBeforeGC:");
  3771         prepare_for_verify();
  3772         Universe::verify(/* silent      */ false,
  3773                          /* option      */ VerifyOption_G1UsePrevMarking);
  3776       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3778       // Please see comment in g1CollectedHeap.hpp and
  3779       // G1CollectedHeap::ref_processing_init() to see how
  3780       // reference processing currently works in G1.
  3782       // Enable discovery in the STW reference processor
  3783       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
  3784                                             true /*verify_no_refs*/);
  3787         // We want to temporarily turn off discovery by the
  3788         // CM ref processor, if necessary, and turn it back on
  3789         // on again later if we do. Using a scoped
  3790         // NoRefDiscovery object will do this.
  3791         NoRefDiscovery no_cm_discovery(ref_processor_cm());
  3793         // Forget the current alloc region (we might even choose it to be part
  3794         // of the collection set!).
  3795         release_mutator_alloc_region();
  3797         // We should call this after we retire the mutator alloc
  3798         // region(s) so that all the ALLOC / RETIRE events are generated
  3799         // before the start GC event.
  3800         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
  3802         // This timing is only used by the ergonomics to handle our pause target.
  3803         // It is unclear why this should not include the full pause. We will
  3804         // investigate this in CR 7178365.
  3805         //
  3806         // Preserving the old comment here if that helps the investigation:
  3807         //
  3808         // The elapsed time induced by the start time below deliberately elides
  3809         // the possible verification above.
  3810         double sample_start_time_sec = os::elapsedTime();
  3811         size_t start_used_bytes = used();
  3813 #if YOUNG_LIST_VERBOSE
  3814         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  3815         _young_list->print();
  3816         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3817 #endif // YOUNG_LIST_VERBOSE
  3819         g1_policy()->record_collection_pause_start(sample_start_time_sec,
  3820                                                    start_used_bytes);
  3822         double scan_wait_start = os::elapsedTime();
  3823         // We have to wait until the CM threads finish scanning the
  3824         // root regions as it's the only way to ensure that all the
  3825         // objects on them have been correctly scanned before we start
  3826         // moving them during the GC.
  3827         bool waited = _cm->root_regions()->wait_until_scan_finished();
  3828         double wait_time_ms = 0.0;
  3829         if (waited) {
  3830           double scan_wait_end = os::elapsedTime();
  3831           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
  3833         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
  3835 #if YOUNG_LIST_VERBOSE
  3836         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  3837         _young_list->print();
  3838 #endif // YOUNG_LIST_VERBOSE
  3840         if (g1_policy()->during_initial_mark_pause()) {
  3841           concurrent_mark()->checkpointRootsInitialPre();
  3843         perm_gen()->save_marks();
  3845 #if YOUNG_LIST_VERBOSE
  3846         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  3847         _young_list->print();
  3848         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3849 #endif // YOUNG_LIST_VERBOSE
  3851         g1_policy()->finalize_cset(target_pause_time_ms);
  3853         _cm->note_start_of_gc();
  3854         // We should not verify the per-thread SATB buffers given that
  3855         // we have not filtered them yet (we'll do so during the
  3856         // GC). We also call this after finalize_cset() to
  3857         // ensure that the CSet has been finalized.
  3858         _cm->verify_no_cset_oops(true  /* verify_stacks */,
  3859                                  true  /* verify_enqueued_buffers */,
  3860                                  false /* verify_thread_buffers */,
  3861                                  true  /* verify_fingers */);
  3863         if (_hr_printer.is_active()) {
  3864           HeapRegion* hr = g1_policy()->collection_set();
  3865           while (hr != NULL) {
  3866             G1HRPrinter::RegionType type;
  3867             if (!hr->is_young()) {
  3868               type = G1HRPrinter::Old;
  3869             } else if (hr->is_survivor()) {
  3870               type = G1HRPrinter::Survivor;
  3871             } else {
  3872               type = G1HRPrinter::Eden;
  3874             _hr_printer.cset(hr);
  3875             hr = hr->next_in_collection_set();
  3879 #ifdef ASSERT
  3880         VerifyCSetClosure cl;
  3881         collection_set_iterate(&cl);
  3882 #endif // ASSERT
  3884         setup_surviving_young_words();
  3886         // Initialize the GC alloc regions.
  3887         init_gc_alloc_regions();
  3889         // Actually do the work...
  3890         evacuate_collection_set();
  3892         // We do this to mainly verify the per-thread SATB buffers
  3893         // (which have been filtered by now) since we didn't verify
  3894         // them earlier. No point in re-checking the stacks / enqueued
  3895         // buffers given that the CSet has not changed since last time
  3896         // we checked.
  3897         _cm->verify_no_cset_oops(false /* verify_stacks */,
  3898                                  false /* verify_enqueued_buffers */,
  3899                                  true  /* verify_thread_buffers */,
  3900                                  true  /* verify_fingers */);
  3902         free_collection_set(g1_policy()->collection_set());
  3903         g1_policy()->clear_collection_set();
  3905         cleanup_surviving_young_words();
  3907         // Start a new incremental collection set for the next pause.
  3908         g1_policy()->start_incremental_cset_building();
  3910         // Clear the _cset_fast_test bitmap in anticipation of adding
  3911         // regions to the incremental collection set for the next
  3912         // evacuation pause.
  3913         clear_cset_fast_test();
  3915         _young_list->reset_sampled_info();
  3917         // Don't check the whole heap at this point as the
  3918         // GC alloc regions from this pause have been tagged
  3919         // as survivors and moved on to the survivor list.
  3920         // Survivor regions will fail the !is_young() check.
  3921         assert(check_young_list_empty(false /* check_heap */),
  3922           "young list should be empty");
  3924 #if YOUNG_LIST_VERBOSE
  3925         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
  3926         _young_list->print();
  3927 #endif // YOUNG_LIST_VERBOSE
  3929         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  3930                                             _young_list->first_survivor_region(),
  3931                                             _young_list->last_survivor_region());
  3933         _young_list->reset_auxilary_lists();
  3935         if (evacuation_failed()) {
  3936           _summary_bytes_used = recalculate_used();
  3937         } else {
  3938           // The "used" of the the collection set have already been subtracted
  3939           // when they were freed.  Add in the bytes evacuated.
  3940           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
  3943         if (g1_policy()->during_initial_mark_pause()) {
  3944           // We have to do this before we notify the CM threads that
  3945           // they can start working to make sure that all the
  3946           // appropriate initialization is done on the CM object.
  3947           concurrent_mark()->checkpointRootsInitialPost();
  3948           set_marking_started();
  3949           // Note that we don't actually trigger the CM thread at
  3950           // this point. We do that later when we're sure that
  3951           // the current thread has completed its logging output.
  3954         allocate_dummy_regions();
  3956 #if YOUNG_LIST_VERBOSE
  3957         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  3958         _young_list->print();
  3959         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3960 #endif // YOUNG_LIST_VERBOSE
  3962         init_mutator_alloc_region();
  3965           size_t expand_bytes = g1_policy()->expansion_amount();
  3966           if (expand_bytes > 0) {
  3967             size_t bytes_before = capacity();
  3968             // No need for an ergo verbose message here,
  3969             // expansion_amount() does this when it returns a value > 0.
  3970             if (!expand(expand_bytes)) {
  3971               // We failed to expand the heap so let's verify that
  3972               // committed/uncommitted amount match the backing store
  3973               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
  3974               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
  3979         // We redo the verificaiton but now wrt to the new CSet which
  3980         // has just got initialized after the previous CSet was freed.
  3981         _cm->verify_no_cset_oops(true  /* verify_stacks */,
  3982                                  true  /* verify_enqueued_buffers */,
  3983                                  true  /* verify_thread_buffers */,
  3984                                  true  /* verify_fingers */);
  3985         _cm->note_end_of_gc();
  3987         // Collect thread local data to allow the ergonomics to use
  3988         // the collected information
  3989         g1_policy()->phase_times()->collapse_par_times();
  3991         // This timing is only used by the ergonomics to handle our pause target.
  3992         // It is unclear why this should not include the full pause. We will
  3993         // investigate this in CR 7178365.
  3994         double sample_end_time_sec = os::elapsedTime();
  3995         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
  3996         g1_policy()->record_collection_pause_end(pause_time_ms);
  3998         MemoryService::track_memory_usage();
  4000         // In prepare_for_verify() below we'll need to scan the deferred
  4001         // update buffers to bring the RSets up-to-date if
  4002         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
  4003         // the update buffers we'll probably need to scan cards on the
  4004         // regions we just allocated to (i.e., the GC alloc
  4005         // regions). However, during the last GC we called
  4006         // set_saved_mark() on all the GC alloc regions, so card
  4007         // scanning might skip the [saved_mark_word()...top()] area of
  4008         // those regions (i.e., the area we allocated objects into
  4009         // during the last GC). But it shouldn't. Given that
  4010         // saved_mark_word() is conditional on whether the GC time stamp
  4011         // on the region is current or not, by incrementing the GC time
  4012         // stamp here we invalidate all the GC time stamps on all the
  4013         // regions and saved_mark_word() will simply return top() for
  4014         // all the regions. This is a nicer way of ensuring this rather
  4015         // than iterating over the regions and fixing them. In fact, the
  4016         // GC time stamp increment here also ensures that
  4017         // saved_mark_word() will return top() between pauses, i.e.,
  4018         // during concurrent refinement. So we don't need the
  4019         // is_gc_active() check to decided which top to use when
  4020         // scanning cards (see CR 7039627).
  4021         increment_gc_time_stamp();
  4023         if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  4024           HandleMark hm;  // Discard invalid handles created during verification
  4025           gclog_or_tty->print(" VerifyAfterGC:");
  4026           prepare_for_verify();
  4027           Universe::verify(/* silent      */ false,
  4028                            /* option      */ VerifyOption_G1UsePrevMarking);
  4031         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  4032         ref_processor_stw()->verify_no_references_recorded();
  4034         // CM reference discovery will be re-enabled if necessary.
  4037       // We should do this after we potentially expand the heap so
  4038       // that all the COMMIT events are generated before the end GC
  4039       // event, and after we retire the GC alloc regions so that all
  4040       // RETIRE events are generated before the end GC event.
  4041       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
  4043       if (mark_in_progress()) {
  4044         concurrent_mark()->update_g1_committed();
  4047 #ifdef TRACESPINNING
  4048       ParallelTaskTerminator::print_termination_counts();
  4049 #endif
  4051       gc_epilogue(false);
  4053       g1_policy()->phase_times()->note_gc_end(os::elapsedTime());
  4055       // We have to do this after we decide whether to expand the heap or not.
  4056       g1_policy()->print_heap_transition();
  4059     // It is not yet to safe to tell the concurrent mark to
  4060     // start as we have some optional output below. We don't want the
  4061     // output from the concurrent mark thread interfering with this
  4062     // logging output either.
  4064     _hrs.verify_optional();
  4065     verify_region_sets_optional();
  4067     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
  4068     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  4070     print_heap_after_gc();
  4072     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  4073     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  4074     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  4075     // before any GC notifications are raised.
  4076     g1mm()->update_sizes();
  4079   if (G1SummarizeRSetStats &&
  4080       (G1SummarizeRSetStatsPeriod > 0) &&
  4081       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  4082     g1_rem_set()->print_summary_info();
  4085   // It should now be safe to tell the concurrent mark thread to start
  4086   // without its logging output interfering with the logging output
  4087   // that came from the pause.
  4089   if (should_start_conc_mark) {
  4090     // CAUTION: after the doConcurrentMark() call below,
  4091     // the concurrent marking thread(s) could be running
  4092     // concurrently with us. Make sure that anything after
  4093     // this point does not assume that we are the only GC thread
  4094     // running. Note: of course, the actual marking work will
  4095     // not start until the safepoint itself is released in
  4096     // ConcurrentGCThread::safepoint_desynchronize().
  4097     doConcurrentMark();
  4100   return true;
  4103 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
  4105   size_t gclab_word_size;
  4106   switch (purpose) {
  4107     case GCAllocForSurvived:
  4108       gclab_word_size = _survivor_plab_stats.desired_plab_sz();
  4109       break;
  4110     case GCAllocForTenured:
  4111       gclab_word_size = _old_plab_stats.desired_plab_sz();
  4112       break;
  4113     default:
  4114       assert(false, "unknown GCAllocPurpose");
  4115       gclab_word_size = _old_plab_stats.desired_plab_sz();
  4116       break;
  4119   // Prevent humongous PLAB sizes for two reasons:
  4120   // * PLABs are allocated using a similar paths as oops, but should
  4121   //   never be in a humongous region
  4122   // * Allowing humongous PLABs needlessly churns the region free lists
  4123   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
  4126 void G1CollectedHeap::init_mutator_alloc_region() {
  4127   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  4128   _mutator_alloc_region.init();
  4131 void G1CollectedHeap::release_mutator_alloc_region() {
  4132   _mutator_alloc_region.release();
  4133   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  4136 void G1CollectedHeap::init_gc_alloc_regions() {
  4137   assert_at_safepoint(true /* should_be_vm_thread */);
  4139   _survivor_gc_alloc_region.init();
  4140   _old_gc_alloc_region.init();
  4141   HeapRegion* retained_region = _retained_old_gc_alloc_region;
  4142   _retained_old_gc_alloc_region = NULL;
  4144   // We will discard the current GC alloc region if:
  4145   // a) it's in the collection set (it can happen!),
  4146   // b) it's already full (no point in using it),
  4147   // c) it's empty (this means that it was emptied during
  4148   // a cleanup and it should be on the free list now), or
  4149   // d) it's humongous (this means that it was emptied
  4150   // during a cleanup and was added to the free list, but
  4151   // has been subseqently used to allocate a humongous
  4152   // object that may be less than the region size).
  4153   if (retained_region != NULL &&
  4154       !retained_region->in_collection_set() &&
  4155       !(retained_region->top() == retained_region->end()) &&
  4156       !retained_region->is_empty() &&
  4157       !retained_region->isHumongous()) {
  4158     retained_region->set_saved_mark();
  4159     // The retained region was added to the old region set when it was
  4160     // retired. We have to remove it now, since we don't allow regions
  4161     // we allocate to in the region sets. We'll re-add it later, when
  4162     // it's retired again.
  4163     _old_set.remove(retained_region);
  4164     bool during_im = g1_policy()->during_initial_mark_pause();
  4165     retained_region->note_start_of_copying(during_im);
  4166     _old_gc_alloc_region.set(retained_region);
  4167     _hr_printer.reuse(retained_region);
  4171 void G1CollectedHeap::release_gc_alloc_regions() {
  4172   _survivor_gc_alloc_region.release();
  4173   // If we have an old GC alloc region to release, we'll save it in
  4174   // _retained_old_gc_alloc_region. If we don't
  4175   // _retained_old_gc_alloc_region will become NULL. This is what we
  4176   // want either way so no reason to check explicitly for either
  4177   // condition.
  4178   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
  4180   if (ResizePLAB) {
  4181     _survivor_plab_stats.adjust_desired_plab_sz();
  4182     _old_plab_stats.adjust_desired_plab_sz();
  4186 void G1CollectedHeap::abandon_gc_alloc_regions() {
  4187   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
  4188   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
  4189   _retained_old_gc_alloc_region = NULL;
  4192 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  4193   _drain_in_progress = false;
  4194   set_evac_failure_closure(cl);
  4195   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
  4198 void G1CollectedHeap::finalize_for_evac_failure() {
  4199   assert(_evac_failure_scan_stack != NULL &&
  4200          _evac_failure_scan_stack->length() == 0,
  4201          "Postcondition");
  4202   assert(!_drain_in_progress, "Postcondition");
  4203   delete _evac_failure_scan_stack;
  4204   _evac_failure_scan_stack = NULL;
  4207 void G1CollectedHeap::remove_self_forwarding_pointers() {
  4208   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  4210   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
  4212   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4213     set_par_threads();
  4214     workers()->run_task(&rsfp_task);
  4215     set_par_threads(0);
  4216   } else {
  4217     rsfp_task.work(0);
  4220   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
  4222   // Reset the claim values in the regions in the collection set.
  4223   reset_cset_heap_region_claim_values();
  4225   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  4227   // Now restore saved marks, if any.
  4228   if (_objs_with_preserved_marks != NULL) {
  4229     assert(_preserved_marks_of_objs != NULL, "Both or none.");
  4230     guarantee(_objs_with_preserved_marks->length() ==
  4231               _preserved_marks_of_objs->length(), "Both or none.");
  4232     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
  4233       oop obj   = _objs_with_preserved_marks->at(i);
  4234       markOop m = _preserved_marks_of_objs->at(i);
  4235       obj->set_mark(m);
  4238     // Delete the preserved marks growable arrays (allocated on the C heap).
  4239     delete _objs_with_preserved_marks;
  4240     delete _preserved_marks_of_objs;
  4241     _objs_with_preserved_marks = NULL;
  4242     _preserved_marks_of_objs = NULL;
  4246 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  4247   _evac_failure_scan_stack->push(obj);
  4250 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  4251   assert(_evac_failure_scan_stack != NULL, "precondition");
  4253   while (_evac_failure_scan_stack->length() > 0) {
  4254      oop obj = _evac_failure_scan_stack->pop();
  4255      _evac_failure_closure->set_region(heap_region_containing(obj));
  4256      obj->oop_iterate_backwards(_evac_failure_closure);
  4260 oop
  4261 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  4262                                                oop old) {
  4263   assert(obj_in_cs(old),
  4264          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
  4265                  (HeapWord*) old));
  4266   markOop m = old->mark();
  4267   oop forward_ptr = old->forward_to_atomic(old);
  4268   if (forward_ptr == NULL) {
  4269     // Forward-to-self succeeded.
  4271     if (_evac_failure_closure != cl) {
  4272       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  4273       assert(!_drain_in_progress,
  4274              "Should only be true while someone holds the lock.");
  4275       // Set the global evac-failure closure to the current thread's.
  4276       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  4277       set_evac_failure_closure(cl);
  4278       // Now do the common part.
  4279       handle_evacuation_failure_common(old, m);
  4280       // Reset to NULL.
  4281       set_evac_failure_closure(NULL);
  4282     } else {
  4283       // The lock is already held, and this is recursive.
  4284       assert(_drain_in_progress, "This should only be the recursive case.");
  4285       handle_evacuation_failure_common(old, m);
  4287     return old;
  4288   } else {
  4289     // Forward-to-self failed. Either someone else managed to allocate
  4290     // space for this object (old != forward_ptr) or they beat us in
  4291     // self-forwarding it (old == forward_ptr).
  4292     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
  4293            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
  4294                    "should not be in the CSet",
  4295                    (HeapWord*) old, (HeapWord*) forward_ptr));
  4296     return forward_ptr;
  4300 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  4301   set_evacuation_failed(true);
  4303   preserve_mark_if_necessary(old, m);
  4305   HeapRegion* r = heap_region_containing(old);
  4306   if (!r->evacuation_failed()) {
  4307     r->set_evacuation_failed(true);
  4308     _hr_printer.evac_failure(r);
  4311   push_on_evac_failure_scan_stack(old);
  4313   if (!_drain_in_progress) {
  4314     // prevent recursion in copy_to_survivor_space()
  4315     _drain_in_progress = true;
  4316     drain_evac_failure_scan_stack();
  4317     _drain_in_progress = false;
  4321 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  4322   assert(evacuation_failed(), "Oversaving!");
  4323   // We want to call the "for_promotion_failure" version only in the
  4324   // case of a promotion failure.
  4325   if (m->must_be_preserved_for_promotion_failure(obj)) {
  4326     if (_objs_with_preserved_marks == NULL) {
  4327       assert(_preserved_marks_of_objs == NULL, "Both or none.");
  4328       _objs_with_preserved_marks =
  4329         new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
  4330       _preserved_marks_of_objs =
  4331         new (ResourceObj::C_HEAP, mtGC) GrowableArray<markOop>(40, true);
  4333     _objs_with_preserved_marks->push(obj);
  4334     _preserved_marks_of_objs->push(m);
  4338 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  4339                                                   size_t word_size) {
  4340   if (purpose == GCAllocForSurvived) {
  4341     HeapWord* result = survivor_attempt_allocation(word_size);
  4342     if (result != NULL) {
  4343       return result;
  4344     } else {
  4345       // Let's try to allocate in the old gen in case we can fit the
  4346       // object there.
  4347       return old_attempt_allocation(word_size);
  4349   } else {
  4350     assert(purpose ==  GCAllocForTenured, "sanity");
  4351     HeapWord* result = old_attempt_allocation(word_size);
  4352     if (result != NULL) {
  4353       return result;
  4354     } else {
  4355       // Let's try to allocate in the survivors in case we can fit the
  4356       // object there.
  4357       return survivor_attempt_allocation(word_size);
  4361   ShouldNotReachHere();
  4362   // Trying to keep some compilers happy.
  4363   return NULL;
  4366 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
  4367   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
  4369 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
  4370   : _g1h(g1h),
  4371     _refs(g1h->task_queue(queue_num)),
  4372     _dcq(&g1h->dirty_card_queue_set()),
  4373     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
  4374     _g1_rem(g1h->g1_rem_set()),
  4375     _hash_seed(17), _queue_num(queue_num),
  4376     _term_attempts(0),
  4377     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
  4378     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
  4379     _age_table(false),
  4380     _strong_roots_time(0), _term_time(0),
  4381     _alloc_buffer_waste(0), _undo_waste(0) {
  4382   // we allocate G1YoungSurvRateNumRegions plus one entries, since
  4383   // we "sacrifice" entry 0 to keep track of surviving bytes for
  4384   // non-young regions (where the age is -1)
  4385   // We also add a few elements at the beginning and at the end in
  4386   // an attempt to eliminate cache contention
  4387   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
  4388   uint array_length = PADDING_ELEM_NUM +
  4389                       real_length +
  4390                       PADDING_ELEM_NUM;
  4391   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
  4392   if (_surviving_young_words_base == NULL)
  4393     vm_exit_out_of_memory(array_length * sizeof(size_t),
  4394                           "Not enough space for young surv histo.");
  4395   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  4396   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
  4398   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
  4399   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
  4401   _start = os::elapsedTime();
  4404 void
  4405 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
  4407   st->print_raw_cr("GC Termination Stats");
  4408   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
  4409                    " ------waste (KiB)------");
  4410   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
  4411                    "  total   alloc    undo");
  4412   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
  4413                    " ------- ------- -------");
  4416 void
  4417 G1ParScanThreadState::print_termination_stats(int i,
  4418                                               outputStream* const st) const
  4420   const double elapsed_ms = elapsed_time() * 1000.0;
  4421   const double s_roots_ms = strong_roots_time() * 1000.0;
  4422   const double term_ms    = term_time() * 1000.0;
  4423   st->print_cr("%3d %9.2f %9.2f %6.2f "
  4424                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
  4425                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
  4426                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
  4427                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
  4428                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
  4429                alloc_buffer_waste() * HeapWordSize / K,
  4430                undo_waste() * HeapWordSize / K);
  4433 #ifdef ASSERT
  4434 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
  4435   assert(ref != NULL, "invariant");
  4436   assert(UseCompressedOops, "sanity");
  4437   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
  4438   oop p = oopDesc::load_decode_heap_oop(ref);
  4439   assert(_g1h->is_in_g1_reserved(p),
  4440          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4441   return true;
  4444 bool G1ParScanThreadState::verify_ref(oop* ref) const {
  4445   assert(ref != NULL, "invariant");
  4446   if (has_partial_array_mask(ref)) {
  4447     // Must be in the collection set--it's already been copied.
  4448     oop p = clear_partial_array_mask(ref);
  4449     assert(_g1h->obj_in_cs(p),
  4450            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4451   } else {
  4452     oop p = oopDesc::load_decode_heap_oop(ref);
  4453     assert(_g1h->is_in_g1_reserved(p),
  4454            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4456   return true;
  4459 bool G1ParScanThreadState::verify_task(StarTask ref) const {
  4460   if (ref.is_narrow()) {
  4461     return verify_ref((narrowOop*) ref);
  4462   } else {
  4463     return verify_ref((oop*) ref);
  4466 #endif // ASSERT
  4468 void G1ParScanThreadState::trim_queue() {
  4469   assert(_evac_cl != NULL, "not set");
  4470   assert(_evac_failure_cl != NULL, "not set");
  4471   assert(_partial_scan_cl != NULL, "not set");
  4473   StarTask ref;
  4474   do {
  4475     // Drain the overflow stack first, so other threads can steal.
  4476     while (refs()->pop_overflow(ref)) {
  4477       deal_with_reference(ref);
  4480     while (refs()->pop_local(ref)) {
  4481       deal_with_reference(ref);
  4483   } while (!refs()->is_empty());
  4486 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
  4487                                      G1ParScanThreadState* par_scan_state) :
  4488   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  4489   _par_scan_state(par_scan_state),
  4490   _worker_id(par_scan_state->queue_num()),
  4491   _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
  4492   _mark_in_progress(_g1->mark_in_progress()) { }
  4494 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
  4495 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
  4496 #ifdef ASSERT
  4497   HeapRegion* hr = _g1->heap_region_containing(obj);
  4498   assert(hr != NULL, "sanity");
  4499   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
  4500 #endif // ASSERT
  4502   // We know that the object is not moving so it's safe to read its size.
  4503   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
  4506 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
  4507 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
  4508   ::mark_forwarded_object(oop from_obj, oop to_obj) {
  4509 #ifdef ASSERT
  4510   assert(from_obj->is_forwarded(), "from obj should be forwarded");
  4511   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
  4512   assert(from_obj != to_obj, "should not be self-forwarded");
  4514   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
  4515   assert(from_hr != NULL, "sanity");
  4516   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
  4518   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
  4519   assert(to_hr != NULL, "sanity");
  4520   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
  4521 #endif // ASSERT
  4523   // The object might be in the process of being copied by another
  4524   // worker so we cannot trust that its to-space image is
  4525   // well-formed. So we have to read its size from its from-space
  4526   // image which we know should not be changing.
  4527   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
  4530 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
  4531 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
  4532   ::copy_to_survivor_space(oop old) {
  4533   size_t word_sz = old->size();
  4534   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  4535   // +1 to make the -1 indexes valid...
  4536   int       young_index = from_region->young_index_in_cset()+1;
  4537   assert( (from_region->is_young() && young_index >  0) ||
  4538          (!from_region->is_young() && young_index == 0), "invariant" );
  4539   G1CollectorPolicy* g1p = _g1->g1_policy();
  4540   markOop m = old->mark();
  4541   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  4542                                            : m->age();
  4543   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  4544                                                              word_sz);
  4545   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
  4546   oop       obj     = oop(obj_ptr);
  4548   if (obj_ptr == NULL) {
  4549     // This will either forward-to-self, or detect that someone else has
  4550     // installed a forwarding pointer.
  4551     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  4552     return _g1->handle_evacuation_failure_par(cl, old);
  4555   // We're going to allocate linearly, so might as well prefetch ahead.
  4556   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
  4558   oop forward_ptr = old->forward_to_atomic(obj);
  4559   if (forward_ptr == NULL) {
  4560     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
  4561     if (g1p->track_object_age(alloc_purpose)) {
  4562       // We could simply do obj->incr_age(). However, this causes a
  4563       // performance issue. obj->incr_age() will first check whether
  4564       // the object has a displaced mark by checking its mark word;
  4565       // getting the mark word from the new location of the object
  4566       // stalls. So, given that we already have the mark word and we
  4567       // are about to install it anyway, it's better to increase the
  4568       // age on the mark word, when the object does not have a
  4569       // displaced mark word. We're not expecting many objects to have
  4570       // a displaced marked word, so that case is not optimized
  4571       // further (it could be...) and we simply call obj->incr_age().
  4573       if (m->has_displaced_mark_helper()) {
  4574         // in this case, we have to install the mark word first,
  4575         // otherwise obj looks to be forwarded (the old mark word,
  4576         // which contains the forward pointer, was copied)
  4577         obj->set_mark(m);
  4578         obj->incr_age();
  4579       } else {
  4580         m = m->incr_age();
  4581         obj->set_mark(m);
  4583       _par_scan_state->age_table()->add(obj, word_sz);
  4584     } else {
  4585       obj->set_mark(m);
  4588     size_t* surv_young_words = _par_scan_state->surviving_young_words();
  4589     surv_young_words[young_index] += word_sz;
  4591     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
  4592       // We keep track of the next start index in the length field of
  4593       // the to-space object. The actual length can be found in the
  4594       // length field of the from-space object.
  4595       arrayOop(obj)->set_length(0);
  4596       oop* old_p = set_partial_array_mask(old);
  4597       _par_scan_state->push_on_queue(old_p);
  4598     } else {
  4599       // No point in using the slower heap_region_containing() method,
  4600       // given that we know obj is in the heap.
  4601       _scanner.set_region(_g1->heap_region_containing_raw(obj));
  4602       obj->oop_iterate_backwards(&_scanner);
  4604   } else {
  4605     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
  4606     obj = forward_ptr;
  4608   return obj;
  4611 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
  4612 template <class T>
  4613 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
  4614 ::do_oop_work(T* p) {
  4615   oop obj = oopDesc::load_decode_heap_oop(p);
  4616   assert(barrier != G1BarrierRS || obj != NULL,
  4617          "Precondition: G1BarrierRS implies obj is non-NULL");
  4619   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
  4621   // here the null check is implicit in the cset_fast_test() test
  4622   if (_g1->in_cset_fast_test(obj)) {
  4623     oop forwardee;
  4624     if (obj->is_forwarded()) {
  4625       forwardee = obj->forwardee();
  4626     } else {
  4627       forwardee = copy_to_survivor_space(obj);
  4629     assert(forwardee != NULL, "forwardee should not be NULL");
  4630     oopDesc::encode_store_heap_oop(p, forwardee);
  4631     if (do_mark_object && forwardee != obj) {
  4632       // If the object is self-forwarded we don't need to explicitly
  4633       // mark it, the evacuation failure protocol will do so.
  4634       mark_forwarded_object(obj, forwardee);
  4637     // When scanning the RS, we only care about objs in CS.
  4638     if (barrier == G1BarrierRS) {
  4639       _par_scan_state->update_rs(_from, p, _worker_id);
  4641   } else {
  4642     // The object is not in collection set. If we're a root scanning
  4643     // closure during an initial mark pause (i.e. do_mark_object will
  4644     // be true) then attempt to mark the object.
  4645     if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
  4646       mark_object(obj);
  4650   if (barrier == G1BarrierEvac && obj != NULL) {
  4651     _par_scan_state->update_rs(_from, p, _worker_id);
  4654   if (do_gen_barrier && obj != NULL) {
  4655     par_do_barrier(p);
  4659 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
  4660 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
  4662 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
  4663   assert(has_partial_array_mask(p), "invariant");
  4664   oop from_obj = clear_partial_array_mask(p);
  4666   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
  4667   assert(from_obj->is_objArray(), "must be obj array");
  4668   objArrayOop from_obj_array = objArrayOop(from_obj);
  4669   // The from-space object contains the real length.
  4670   int length                 = from_obj_array->length();
  4672   assert(from_obj->is_forwarded(), "must be forwarded");
  4673   oop to_obj                 = from_obj->forwardee();
  4674   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
  4675   objArrayOop to_obj_array   = objArrayOop(to_obj);
  4676   // We keep track of the next start index in the length field of the
  4677   // to-space object.
  4678   int next_index             = to_obj_array->length();
  4679   assert(0 <= next_index && next_index < length,
  4680          err_msg("invariant, next index: %d, length: %d", next_index, length));
  4682   int start                  = next_index;
  4683   int end                    = length;
  4684   int remainder              = end - start;
  4685   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
  4686   if (remainder > 2 * ParGCArrayScanChunk) {
  4687     end = start + ParGCArrayScanChunk;
  4688     to_obj_array->set_length(end);
  4689     // Push the remainder before we process the range in case another
  4690     // worker has run out of things to do and can steal it.
  4691     oop* from_obj_p = set_partial_array_mask(from_obj);
  4692     _par_scan_state->push_on_queue(from_obj_p);
  4693   } else {
  4694     assert(length == end, "sanity");
  4695     // We'll process the final range for this object. Restore the length
  4696     // so that the heap remains parsable in case of evacuation failure.
  4697     to_obj_array->set_length(end);
  4699   _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
  4700   // Process indexes [start,end). It will also process the header
  4701   // along with the first chunk (i.e., the chunk with start == 0).
  4702   // Note that at this point the length field of to_obj_array is not
  4703   // correct given that we are using it to keep track of the next
  4704   // start index. oop_iterate_range() (thankfully!) ignores the length
  4705   // field and only relies on the start / end parameters.  It does
  4706   // however return the size of the object which will be incorrect. So
  4707   // we have to ignore it even if we wanted to use it.
  4708   to_obj_array->oop_iterate_range(&_scanner, start, end);
  4711 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4712 protected:
  4713   G1CollectedHeap*              _g1h;
  4714   G1ParScanThreadState*         _par_scan_state;
  4715   RefToScanQueueSet*            _queues;
  4716   ParallelTaskTerminator*       _terminator;
  4718   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  4719   RefToScanQueueSet*      queues()         { return _queues; }
  4720   ParallelTaskTerminator* terminator()     { return _terminator; }
  4722 public:
  4723   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  4724                                 G1ParScanThreadState* par_scan_state,
  4725                                 RefToScanQueueSet* queues,
  4726                                 ParallelTaskTerminator* terminator)
  4727     : _g1h(g1h), _par_scan_state(par_scan_state),
  4728       _queues(queues), _terminator(terminator) {}
  4730   void do_void();
  4732 private:
  4733   inline bool offer_termination();
  4734 };
  4736 bool G1ParEvacuateFollowersClosure::offer_termination() {
  4737   G1ParScanThreadState* const pss = par_scan_state();
  4738   pss->start_term_time();
  4739   const bool res = terminator()->offer_termination();
  4740   pss->end_term_time();
  4741   return res;
  4744 void G1ParEvacuateFollowersClosure::do_void() {
  4745   StarTask stolen_task;
  4746   G1ParScanThreadState* const pss = par_scan_state();
  4747   pss->trim_queue();
  4749   do {
  4750     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  4751       assert(pss->verify_task(stolen_task), "sanity");
  4752       if (stolen_task.is_narrow()) {
  4753         pss->deal_with_reference((narrowOop*) stolen_task);
  4754       } else {
  4755         pss->deal_with_reference((oop*) stolen_task);
  4758       // We've just processed a reference and we might have made
  4759       // available new entries on the queues. So we have to make sure
  4760       // we drain the queues as necessary.
  4761       pss->trim_queue();
  4763   } while (!offer_termination());
  4765   pss->retire_alloc_buffers();
  4768 class G1ParTask : public AbstractGangTask {
  4769 protected:
  4770   G1CollectedHeap*       _g1h;
  4771   RefToScanQueueSet      *_queues;
  4772   ParallelTaskTerminator _terminator;
  4773   uint _n_workers;
  4775   Mutex _stats_lock;
  4776   Mutex* stats_lock() { return &_stats_lock; }
  4778   size_t getNCards() {
  4779     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  4780       / G1BlockOffsetSharedArray::N_bytes;
  4783 public:
  4784   G1ParTask(G1CollectedHeap* g1h,
  4785             RefToScanQueueSet *task_queues)
  4786     : AbstractGangTask("G1 collection"),
  4787       _g1h(g1h),
  4788       _queues(task_queues),
  4789       _terminator(0, _queues),
  4790       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
  4791   {}
  4793   RefToScanQueueSet* queues() { return _queues; }
  4795   RefToScanQueue *work_queue(int i) {
  4796     return queues()->queue(i);
  4799   ParallelTaskTerminator* terminator() { return &_terminator; }
  4801   virtual void set_for_termination(int active_workers) {
  4802     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
  4803     // in the young space (_par_seq_tasks) in the G1 heap
  4804     // for SequentialSubTasksDone.
  4805     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
  4806     // both of which need setting by set_n_termination().
  4807     _g1h->SharedHeap::set_n_termination(active_workers);
  4808     _g1h->set_n_termination(active_workers);
  4809     terminator()->reset_for_reuse(active_workers);
  4810     _n_workers = active_workers;
  4813   void work(uint worker_id) {
  4814     if (worker_id >= _n_workers) return;  // no work needed this round
  4816     double start_time_ms = os::elapsedTime() * 1000.0;
  4817     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
  4820       ResourceMark rm;
  4821       HandleMark   hm;
  4823       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
  4825       G1ParScanThreadState            pss(_g1h, worker_id);
  4826       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
  4827       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
  4828       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
  4830       pss.set_evac_closure(&scan_evac_cl);
  4831       pss.set_evac_failure_closure(&evac_failure_cl);
  4832       pss.set_partial_scan_closure(&partial_scan_cl);
  4834       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
  4835       G1ParScanPermClosure           only_scan_perm_cl(_g1h, &pss, rp);
  4837       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
  4838       G1ParScanAndMarkPermClosure    scan_mark_perm_cl(_g1h, &pss, rp);
  4840       OopClosure*                    scan_root_cl = &only_scan_root_cl;
  4841       OopsInHeapRegionClosure*       scan_perm_cl = &only_scan_perm_cl;
  4843       if (_g1h->g1_policy()->during_initial_mark_pause()) {
  4844         // We also need to mark copied objects.
  4845         scan_root_cl = &scan_mark_root_cl;
  4846         scan_perm_cl = &scan_mark_perm_cl;
  4849       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  4851       pss.start_strong_roots();
  4852       _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  4853                                     SharedHeap::SO_AllClasses,
  4854                                     scan_root_cl,
  4855                                     &push_heap_rs_cl,
  4856                                     scan_perm_cl,
  4857                                     worker_id);
  4858       pss.end_strong_roots();
  4861         double start = os::elapsedTime();
  4862         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  4863         evac.do_void();
  4864         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  4865         double term_ms = pss.term_time()*1000.0;
  4866         _g1h->g1_policy()->phase_times()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
  4867         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
  4869       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  4870       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  4872       if (ParallelGCVerbose) {
  4873         MutexLocker x(stats_lock());
  4874         pss.print_termination_stats(worker_id);
  4877       assert(pss.refs()->is_empty(), "should be empty");
  4879       // Close the inner scope so that the ResourceMark and HandleMark
  4880       // destructors are executed here and are included as part of the
  4881       // "GC Worker Time".
  4884     double end_time_ms = os::elapsedTime() * 1000.0;
  4885     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
  4887 };
  4889 // *** Common G1 Evacuation Stuff
  4891 // Closures that support the filtering of CodeBlobs scanned during
  4892 // external root scanning.
  4894 // Closure applied to reference fields in code blobs (specifically nmethods)
  4895 // to determine whether an nmethod contains references that point into
  4896 // the collection set. Used as a predicate when walking code roots so
  4897 // that only nmethods that point into the collection set are added to the
  4898 // 'marked' list.
  4900 class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
  4902   class G1PointsIntoCSOopClosure : public OopClosure {
  4903     G1CollectedHeap* _g1;
  4904     bool _points_into_cs;
  4905   public:
  4906     G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
  4907       _g1(g1), _points_into_cs(false) { }
  4909     bool points_into_cs() const { return _points_into_cs; }
  4911     template <class T>
  4912     void do_oop_nv(T* p) {
  4913       if (!_points_into_cs) {
  4914         T heap_oop = oopDesc::load_heap_oop(p);
  4915         if (!oopDesc::is_null(heap_oop) &&
  4916             _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
  4917           _points_into_cs = true;
  4922     virtual void do_oop(oop* p)        { do_oop_nv(p); }
  4923     virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
  4924   };
  4926   G1CollectedHeap* _g1;
  4928 public:
  4929   G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
  4930     CodeBlobToOopClosure(cl, true), _g1(g1) { }
  4932   virtual void do_code_blob(CodeBlob* cb) {
  4933     nmethod* nm = cb->as_nmethod_or_null();
  4934     if (nm != NULL && !(nm->test_oops_do_mark())) {
  4935       G1PointsIntoCSOopClosure predicate_cl(_g1);
  4936       nm->oops_do(&predicate_cl);
  4938       if (predicate_cl.points_into_cs()) {
  4939         // At least one of the reference fields or the oop relocations
  4940         // in the nmethod points into the collection set. We have to
  4941         // 'mark' this nmethod.
  4942         // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
  4943         // or MarkingCodeBlobClosure::do_code_blob() change.
  4944         if (!nm->test_set_oops_do_mark()) {
  4945           do_newly_marked_nmethod(nm);
  4950 };
  4952 // This method is run in a GC worker.
  4954 void
  4955 G1CollectedHeap::
  4956 g1_process_strong_roots(bool collecting_perm_gen,
  4957                         ScanningOption so,
  4958                         OopClosure* scan_non_heap_roots,
  4959                         OopsInHeapRegionClosure* scan_rs,
  4960                         OopsInGenClosure* scan_perm,
  4961                         int worker_i) {
  4963   // First scan the strong roots, including the perm gen.
  4964   double ext_roots_start = os::elapsedTime();
  4965   double closure_app_time_sec = 0.0;
  4967   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4968   BufferingOopsInGenClosure buf_scan_perm(scan_perm);
  4969   buf_scan_perm.set_generation(perm_gen());
  4971   // Walk the code cache w/o buffering, because StarTask cannot handle
  4972   // unaligned oop locations.
  4973   G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
  4975   process_strong_roots(false, // no scoping; this is parallel code
  4976                        collecting_perm_gen, so,
  4977                        &buf_scan_non_heap_roots,
  4978                        &eager_scan_code_roots,
  4979                        &buf_scan_perm);
  4981   // Now the CM ref_processor roots.
  4982   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4983     // We need to treat the discovered reference lists of the
  4984     // concurrent mark ref processor as roots and keep entries
  4985     // (which are added by the marking threads) on them live
  4986     // until they can be processed at the end of marking.
  4987     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
  4990   // Finish up any enqueued closure apps (attributed as object copy time).
  4991   buf_scan_non_heap_roots.done();
  4992   buf_scan_perm.done();
  4994   double ext_roots_end = os::elapsedTime();
  4996   g1_policy()->phase_times()->reset_obj_copy_time(worker_i);
  4997   double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
  4998                                 buf_scan_non_heap_roots.closure_app_seconds();
  4999   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  5001   double ext_root_time_ms =
  5002     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  5004   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  5006   // During conc marking we have to filter the per-thread SATB buffers
  5007   // to make sure we remove any oops into the CSet (which will show up
  5008   // as implicitly live).
  5009   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
  5010     if (mark_in_progress()) {
  5011       JavaThread::satb_mark_queue_set().filter_thread_buffers();
  5014   double satb_filtering_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
  5015   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
  5017   // Now scan the complement of the collection set.
  5018   if (scan_rs != NULL) {
  5019     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
  5022   _process_strong_tasks->all_tasks_completed();
  5025 void
  5026 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
  5027                                        OopClosure* non_root_closure) {
  5028   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
  5029   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
  5032 // Weak Reference Processing support
  5034 // An always "is_alive" closure that is used to preserve referents.
  5035 // If the object is non-null then it's alive.  Used in the preservation
  5036 // of referent objects that are pointed to by reference objects
  5037 // discovered by the CM ref processor.
  5038 class G1AlwaysAliveClosure: public BoolObjectClosure {
  5039   G1CollectedHeap* _g1;
  5040 public:
  5041   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  5042   void do_object(oop p) { assert(false, "Do not call."); }
  5043   bool do_object_b(oop p) {
  5044     if (p != NULL) {
  5045       return true;
  5047     return false;
  5049 };
  5051 bool G1STWIsAliveClosure::do_object_b(oop p) {
  5052   // An object is reachable if it is outside the collection set,
  5053   // or is inside and copied.
  5054   return !_g1->obj_in_cs(p) || p->is_forwarded();
  5057 // Non Copying Keep Alive closure
  5058 class G1KeepAliveClosure: public OopClosure {
  5059   G1CollectedHeap* _g1;
  5060 public:
  5061   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  5062   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  5063   void do_oop(      oop* p) {
  5064     oop obj = *p;
  5066     if (_g1->obj_in_cs(obj)) {
  5067       assert( obj->is_forwarded(), "invariant" );
  5068       *p = obj->forwardee();
  5071 };
  5073 // Copying Keep Alive closure - can be called from both
  5074 // serial and parallel code as long as different worker
  5075 // threads utilize different G1ParScanThreadState instances
  5076 // and different queues.
  5078 class G1CopyingKeepAliveClosure: public OopClosure {
  5079   G1CollectedHeap*         _g1h;
  5080   OopClosure*              _copy_non_heap_obj_cl;
  5081   OopsInHeapRegionClosure* _copy_perm_obj_cl;
  5082   G1ParScanThreadState*    _par_scan_state;
  5084 public:
  5085   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
  5086                             OopClosure* non_heap_obj_cl,
  5087                             OopsInHeapRegionClosure* perm_obj_cl,
  5088                             G1ParScanThreadState* pss):
  5089     _g1h(g1h),
  5090     _copy_non_heap_obj_cl(non_heap_obj_cl),
  5091     _copy_perm_obj_cl(perm_obj_cl),
  5092     _par_scan_state(pss)
  5093   {}
  5095   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  5096   virtual void do_oop(      oop* p) { do_oop_work(p); }
  5098   template <class T> void do_oop_work(T* p) {
  5099     oop obj = oopDesc::load_decode_heap_oop(p);
  5101     if (_g1h->obj_in_cs(obj)) {
  5102       // If the referent object has been forwarded (either copied
  5103       // to a new location or to itself in the event of an
  5104       // evacuation failure) then we need to update the reference
  5105       // field and, if both reference and referent are in the G1
  5106       // heap, update the RSet for the referent.
  5107       //
  5108       // If the referent has not been forwarded then we have to keep
  5109       // it alive by policy. Therefore we have copy the referent.
  5110       //
  5111       // If the reference field is in the G1 heap then we can push
  5112       // on the PSS queue. When the queue is drained (after each
  5113       // phase of reference processing) the object and it's followers
  5114       // will be copied, the reference field set to point to the
  5115       // new location, and the RSet updated. Otherwise we need to
  5116       // use the the non-heap or perm closures directly to copy
  5117       // the refernt object and update the pointer, while avoiding
  5118       // updating the RSet.
  5120       if (_g1h->is_in_g1_reserved(p)) {
  5121         _par_scan_state->push_on_queue(p);
  5122       } else {
  5123         // The reference field is not in the G1 heap.
  5124         if (_g1h->perm_gen()->is_in(p)) {
  5125           _copy_perm_obj_cl->do_oop(p);
  5126         } else {
  5127           _copy_non_heap_obj_cl->do_oop(p);
  5132 };
  5134 // Serial drain queue closure. Called as the 'complete_gc'
  5135 // closure for each discovered list in some of the
  5136 // reference processing phases.
  5138 class G1STWDrainQueueClosure: public VoidClosure {
  5139 protected:
  5140   G1CollectedHeap* _g1h;
  5141   G1ParScanThreadState* _par_scan_state;
  5143   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  5145 public:
  5146   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
  5147     _g1h(g1h),
  5148     _par_scan_state(pss)
  5149   { }
  5151   void do_void() {
  5152     G1ParScanThreadState* const pss = par_scan_state();
  5153     pss->trim_queue();
  5155 };
  5157 // Parallel Reference Processing closures
  5159 // Implementation of AbstractRefProcTaskExecutor for parallel reference
  5160 // processing during G1 evacuation pauses.
  5162 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
  5163 private:
  5164   G1CollectedHeap*   _g1h;
  5165   RefToScanQueueSet* _queues;
  5166   FlexibleWorkGang*  _workers;
  5167   int                _active_workers;
  5169 public:
  5170   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
  5171                         FlexibleWorkGang* workers,
  5172                         RefToScanQueueSet *task_queues,
  5173                         int n_workers) :
  5174     _g1h(g1h),
  5175     _queues(task_queues),
  5176     _workers(workers),
  5177     _active_workers(n_workers)
  5179     assert(n_workers > 0, "shouldn't call this otherwise");
  5182   // Executes the given task using concurrent marking worker threads.
  5183   virtual void execute(ProcessTask& task);
  5184   virtual void execute(EnqueueTask& task);
  5185 };
  5187 // Gang task for possibly parallel reference processing
  5189 class G1STWRefProcTaskProxy: public AbstractGangTask {
  5190   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
  5191   ProcessTask&     _proc_task;
  5192   G1CollectedHeap* _g1h;
  5193   RefToScanQueueSet *_task_queues;
  5194   ParallelTaskTerminator* _terminator;
  5196 public:
  5197   G1STWRefProcTaskProxy(ProcessTask& proc_task,
  5198                      G1CollectedHeap* g1h,
  5199                      RefToScanQueueSet *task_queues,
  5200                      ParallelTaskTerminator* terminator) :
  5201     AbstractGangTask("Process reference objects in parallel"),
  5202     _proc_task(proc_task),
  5203     _g1h(g1h),
  5204     _task_queues(task_queues),
  5205     _terminator(terminator)
  5206   {}
  5208   virtual void work(uint worker_id) {
  5209     // The reference processing task executed by a single worker.
  5210     ResourceMark rm;
  5211     HandleMark   hm;
  5213     G1STWIsAliveClosure is_alive(_g1h);
  5215     G1ParScanThreadState pss(_g1h, worker_id);
  5217     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
  5218     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
  5219     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
  5221     pss.set_evac_closure(&scan_evac_cl);
  5222     pss.set_evac_failure_closure(&evac_failure_cl);
  5223     pss.set_partial_scan_closure(&partial_scan_cl);
  5225     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
  5226     G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
  5228     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
  5229     G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
  5231     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5232     OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
  5234     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5235       // We also need to mark copied objects.
  5236       copy_non_heap_cl = &copy_mark_non_heap_cl;
  5237       copy_perm_cl = &copy_mark_perm_cl;
  5240     // Keep alive closure.
  5241     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
  5243     // Complete GC closure
  5244     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
  5246     // Call the reference processing task's work routine.
  5247     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
  5249     // Note we cannot assert that the refs array is empty here as not all
  5250     // of the processing tasks (specifically phase2 - pp2_work) execute
  5251     // the complete_gc closure (which ordinarily would drain the queue) so
  5252     // the queue may not be empty.
  5254 };
  5256 // Driver routine for parallel reference processing.
  5257 // Creates an instance of the ref processing gang
  5258 // task and has the worker threads execute it.
  5259 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
  5260   assert(_workers != NULL, "Need parallel worker threads.");
  5262   ParallelTaskTerminator terminator(_active_workers, _queues);
  5263   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
  5265   _g1h->set_par_threads(_active_workers);
  5266   _workers->run_task(&proc_task_proxy);
  5267   _g1h->set_par_threads(0);
  5270 // Gang task for parallel reference enqueueing.
  5272 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
  5273   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
  5274   EnqueueTask& _enq_task;
  5276 public:
  5277   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
  5278     AbstractGangTask("Enqueue reference objects in parallel"),
  5279     _enq_task(enq_task)
  5280   { }
  5282   virtual void work(uint worker_id) {
  5283     _enq_task.work(worker_id);
  5285 };
  5287 // Driver routine for parallel reference enqueing.
  5288 // Creates an instance of the ref enqueueing gang
  5289 // task and has the worker threads execute it.
  5291 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
  5292   assert(_workers != NULL, "Need parallel worker threads.");
  5294   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
  5296   _g1h->set_par_threads(_active_workers);
  5297   _workers->run_task(&enq_task_proxy);
  5298   _g1h->set_par_threads(0);
  5301 // End of weak reference support closures
  5303 // Abstract task used to preserve (i.e. copy) any referent objects
  5304 // that are in the collection set and are pointed to by reference
  5305 // objects discovered by the CM ref processor.
  5307 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
  5308 protected:
  5309   G1CollectedHeap* _g1h;
  5310   RefToScanQueueSet      *_queues;
  5311   ParallelTaskTerminator _terminator;
  5312   uint _n_workers;
  5314 public:
  5315   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
  5316     AbstractGangTask("ParPreserveCMReferents"),
  5317     _g1h(g1h),
  5318     _queues(task_queues),
  5319     _terminator(workers, _queues),
  5320     _n_workers(workers)
  5321   { }
  5323   void work(uint worker_id) {
  5324     ResourceMark rm;
  5325     HandleMark   hm;
  5327     G1ParScanThreadState            pss(_g1h, worker_id);
  5328     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
  5329     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
  5330     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
  5332     pss.set_evac_closure(&scan_evac_cl);
  5333     pss.set_evac_failure_closure(&evac_failure_cl);
  5334     pss.set_partial_scan_closure(&partial_scan_cl);
  5336     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
  5339     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
  5340     G1ParScanPermClosure           only_copy_perm_cl(_g1h, &pss, NULL);
  5342     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
  5343     G1ParScanAndMarkPermClosure    copy_mark_perm_cl(_g1h, &pss, NULL);
  5345     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5346     OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
  5348     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5349       // We also need to mark copied objects.
  5350       copy_non_heap_cl = &copy_mark_non_heap_cl;
  5351       copy_perm_cl = &copy_mark_perm_cl;
  5354     // Is alive closure
  5355     G1AlwaysAliveClosure always_alive(_g1h);
  5357     // Copying keep alive closure. Applied to referent objects that need
  5358     // to be copied.
  5359     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
  5361     ReferenceProcessor* rp = _g1h->ref_processor_cm();
  5363     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
  5364     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
  5366     // limit is set using max_num_q() - which was set using ParallelGCThreads.
  5367     // So this must be true - but assert just in case someone decides to
  5368     // change the worker ids.
  5369     assert(0 <= worker_id && worker_id < limit, "sanity");
  5370     assert(!rp->discovery_is_atomic(), "check this code");
  5372     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
  5373     for (uint idx = worker_id; idx < limit; idx += stride) {
  5374       DiscoveredList& ref_list = rp->discovered_refs()[idx];
  5376       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
  5377       while (iter.has_next()) {
  5378         // Since discovery is not atomic for the CM ref processor, we
  5379         // can see some null referent objects.
  5380         iter.load_ptrs(DEBUG_ONLY(true));
  5381         oop ref = iter.obj();
  5383         // This will filter nulls.
  5384         if (iter.is_referent_alive()) {
  5385           iter.make_referent_alive();
  5387         iter.move_to_next();
  5391     // Drain the queue - which may cause stealing
  5392     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
  5393     drain_queue.do_void();
  5394     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
  5395     assert(pss.refs()->is_empty(), "should be");
  5397 };
  5399 // Weak Reference processing during an evacuation pause (part 1).
  5400 void G1CollectedHeap::process_discovered_references() {
  5401   double ref_proc_start = os::elapsedTime();
  5403   ReferenceProcessor* rp = _ref_processor_stw;
  5404   assert(rp->discovery_enabled(), "should have been enabled");
  5406   // Any reference objects, in the collection set, that were 'discovered'
  5407   // by the CM ref processor should have already been copied (either by
  5408   // applying the external root copy closure to the discovered lists, or
  5409   // by following an RSet entry).
  5410   //
  5411   // But some of the referents, that are in the collection set, that these
  5412   // reference objects point to may not have been copied: the STW ref
  5413   // processor would have seen that the reference object had already
  5414   // been 'discovered' and would have skipped discovering the reference,
  5415   // but would not have treated the reference object as a regular oop.
  5416   // As a reult the copy closure would not have been applied to the
  5417   // referent object.
  5418   //
  5419   // We need to explicitly copy these referent objects - the references
  5420   // will be processed at the end of remarking.
  5421   //
  5422   // We also need to do this copying before we process the reference
  5423   // objects discovered by the STW ref processor in case one of these
  5424   // referents points to another object which is also referenced by an
  5425   // object discovered by the STW ref processor.
  5427   uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  5428                         workers()->active_workers() : 1);
  5430   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
  5431            active_workers == workers()->active_workers(),
  5432            "Need to reset active_workers");
  5434   set_par_threads(active_workers);
  5435   G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
  5437   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5438     workers()->run_task(&keep_cm_referents);
  5439   } else {
  5440     keep_cm_referents.work(0);
  5443   set_par_threads(0);
  5445   // Closure to test whether a referent is alive.
  5446   G1STWIsAliveClosure is_alive(this);
  5448   // Even when parallel reference processing is enabled, the processing
  5449   // of JNI refs is serial and performed serially by the current thread
  5450   // rather than by a worker. The following PSS will be used for processing
  5451   // JNI refs.
  5453   // Use only a single queue for this PSS.
  5454   G1ParScanThreadState pss(this, 0);
  5456   // We do not embed a reference processor in the copying/scanning
  5457   // closures while we're actually processing the discovered
  5458   // reference objects.
  5459   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
  5460   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
  5461   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
  5463   pss.set_evac_closure(&scan_evac_cl);
  5464   pss.set_evac_failure_closure(&evac_failure_cl);
  5465   pss.set_partial_scan_closure(&partial_scan_cl);
  5467   assert(pss.refs()->is_empty(), "pre-condition");
  5469   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
  5470   G1ParScanPermClosure           only_copy_perm_cl(this, &pss, NULL);
  5472   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
  5473   G1ParScanAndMarkPermClosure    copy_mark_perm_cl(this, &pss, NULL);
  5475   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5476   OopsInHeapRegionClosure*       copy_perm_cl = &only_copy_perm_cl;
  5478   if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5479     // We also need to mark copied objects.
  5480     copy_non_heap_cl = &copy_mark_non_heap_cl;
  5481     copy_perm_cl = &copy_mark_perm_cl;
  5484   // Keep alive closure.
  5485   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
  5487   // Serial Complete GC closure
  5488   G1STWDrainQueueClosure drain_queue(this, &pss);
  5490   // Setup the soft refs policy...
  5491   rp->setup_policy(false);
  5493   if (!rp->processing_is_mt()) {
  5494     // Serial reference processing...
  5495     rp->process_discovered_references(&is_alive,
  5496                                       &keep_alive,
  5497                                       &drain_queue,
  5498                                       NULL);
  5499   } else {
  5500     // Parallel reference processing
  5501     assert(rp->num_q() == active_workers, "sanity");
  5502     assert(active_workers <= rp->max_num_q(), "sanity");
  5504     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
  5505     rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
  5508   // We have completed copying any necessary live referent objects
  5509   // (that were not copied during the actual pause) so we can
  5510   // retire any active alloc buffers
  5511   pss.retire_alloc_buffers();
  5512   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
  5514   double ref_proc_time = os::elapsedTime() - ref_proc_start;
  5515   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
  5518 // Weak Reference processing during an evacuation pause (part 2).
  5519 void G1CollectedHeap::enqueue_discovered_references() {
  5520   double ref_enq_start = os::elapsedTime();
  5522   ReferenceProcessor* rp = _ref_processor_stw;
  5523   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
  5525   // Now enqueue any remaining on the discovered lists on to
  5526   // the pending list.
  5527   if (!rp->processing_is_mt()) {
  5528     // Serial reference processing...
  5529     rp->enqueue_discovered_references();
  5530   } else {
  5531     // Parallel reference enqueuing
  5533     uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
  5534     assert(active_workers == workers()->active_workers(),
  5535            "Need to reset active_workers");
  5536     assert(rp->num_q() == active_workers, "sanity");
  5537     assert(active_workers <= rp->max_num_q(), "sanity");
  5539     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
  5540     rp->enqueue_discovered_references(&par_task_executor);
  5543   rp->verify_no_references_recorded();
  5544   assert(!rp->discovery_enabled(), "should have been disabled");
  5546   // FIXME
  5547   // CM's reference processing also cleans up the string and symbol tables.
  5548   // Should we do that here also? We could, but it is a serial operation
  5549   // and could signicantly increase the pause time.
  5551   double ref_enq_time = os::elapsedTime() - ref_enq_start;
  5552   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
  5555 void G1CollectedHeap::evacuate_collection_set() {
  5556   _expand_heap_after_alloc_failure = true;
  5557   set_evacuation_failed(false);
  5559   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  5560   concurrent_g1_refine()->set_use_cache(false);
  5561   concurrent_g1_refine()->clear_hot_cache_claimed_index();
  5563   uint n_workers;
  5564   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5565     n_workers =
  5566       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  5567                                      workers()->active_workers(),
  5568                                      Threads::number_of_non_daemon_threads());
  5569     assert(UseDynamicNumberOfGCThreads ||
  5570            n_workers == workers()->total_workers(),
  5571            "If not dynamic should be using all the  workers");
  5572     workers()->set_active_workers(n_workers);
  5573     set_par_threads(n_workers);
  5574   } else {
  5575     assert(n_par_threads() == 0,
  5576            "Should be the original non-parallel value");
  5577     n_workers = 1;
  5580   G1ParTask g1_par_task(this, _task_queues);
  5582   init_for_evac_failure(NULL);
  5584   rem_set()->prepare_for_younger_refs_iterate(true);
  5586   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  5587   double start_par_time_sec = os::elapsedTime();
  5588   double end_par_time_sec;
  5591     StrongRootsScope srs(this);
  5593     if (G1CollectedHeap::use_parallel_gc_threads()) {
  5594       // The individual threads will set their evac-failure closures.
  5595       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  5596       // These tasks use ShareHeap::_process_strong_tasks
  5597       assert(UseDynamicNumberOfGCThreads ||
  5598              workers()->active_workers() == workers()->total_workers(),
  5599              "If not dynamic should be using all the  workers");
  5600       workers()->run_task(&g1_par_task);
  5601     } else {
  5602       g1_par_task.set_for_termination(n_workers);
  5603       g1_par_task.work(0);
  5605     end_par_time_sec = os::elapsedTime();
  5607     // Closing the inner scope will execute the destructor
  5608     // for the StrongRootsScope object. We record the current
  5609     // elapsed time before closing the scope so that time
  5610     // taken for the SRS destructor is NOT included in the
  5611     // reported parallel time.
  5614   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
  5615   g1_policy()->phase_times()->record_par_time(par_time_ms);
  5617   double code_root_fixup_time_ms =
  5618         (os::elapsedTime() - end_par_time_sec) * 1000.0;
  5619   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
  5621   set_par_threads(0);
  5623   // Process any discovered reference objects - we have
  5624   // to do this _before_ we retire the GC alloc regions
  5625   // as we may have to copy some 'reachable' referent
  5626   // objects (and their reachable sub-graphs) that were
  5627   // not copied during the pause.
  5628   process_discovered_references();
  5630   // Weak root processing.
  5631   // Note: when JSR 292 is enabled and code blobs can contain
  5632   // non-perm oops then we will need to process the code blobs
  5633   // here too.
  5635     G1STWIsAliveClosure is_alive(this);
  5636     G1KeepAliveClosure keep_alive(this);
  5637     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  5640   release_gc_alloc_regions();
  5641   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  5643   concurrent_g1_refine()->clear_hot_cache();
  5644   concurrent_g1_refine()->set_use_cache(true);
  5646   finalize_for_evac_failure();
  5648   if (evacuation_failed()) {
  5649     remove_self_forwarding_pointers();
  5650     if (G1Log::finer()) {
  5651       gclog_or_tty->print(" (to-space exhausted)");
  5652     } else if (G1Log::fine()) {
  5653       gclog_or_tty->print("--");
  5657   // Enqueue any remaining references remaining on the STW
  5658   // reference processor's discovered lists. We need to do
  5659   // this after the card table is cleaned (and verified) as
  5660   // the act of enqueuing entries on to the pending list
  5661   // will log these updates (and dirty their associated
  5662   // cards). We need these updates logged to update any
  5663   // RSets.
  5664   enqueue_discovered_references();
  5666   if (G1DeferredRSUpdate) {
  5667     RedirtyLoggedCardTableEntryFastClosure redirty;
  5668     dirty_card_queue_set().set_closure(&redirty);
  5669     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  5671     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
  5672     dcq.merge_bufferlists(&dirty_card_queue_set());
  5673     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  5675   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  5678 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
  5679                                      size_t* pre_used,
  5680                                      FreeRegionList* free_list,
  5681                                      OldRegionSet* old_proxy_set,
  5682                                      HumongousRegionSet* humongous_proxy_set,
  5683                                      HRRSCleanupTask* hrrs_cleanup_task,
  5684                                      bool par) {
  5685   if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
  5686     if (hr->isHumongous()) {
  5687       assert(hr->startsHumongous(), "we should only see starts humongous");
  5688       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
  5689     } else {
  5690       _old_set.remove_with_proxy(hr, old_proxy_set);
  5691       free_region(hr, pre_used, free_list, par);
  5693   } else {
  5694     hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
  5698 void G1CollectedHeap::free_region(HeapRegion* hr,
  5699                                   size_t* pre_used,
  5700                                   FreeRegionList* free_list,
  5701                                   bool par) {
  5702   assert(!hr->isHumongous(), "this is only for non-humongous regions");
  5703   assert(!hr->is_empty(), "the region should not be empty");
  5704   assert(free_list != NULL, "pre-condition");
  5706   *pre_used += hr->used();
  5707   hr->hr_clear(par, true /* clear_space */);
  5708   free_list->add_as_head(hr);
  5711 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
  5712                                      size_t* pre_used,
  5713                                      FreeRegionList* free_list,
  5714                                      HumongousRegionSet* humongous_proxy_set,
  5715                                      bool par) {
  5716   assert(hr->startsHumongous(), "this is only for starts humongous regions");
  5717   assert(free_list != NULL, "pre-condition");
  5718   assert(humongous_proxy_set != NULL, "pre-condition");
  5720   size_t hr_used = hr->used();
  5721   size_t hr_capacity = hr->capacity();
  5722   size_t hr_pre_used = 0;
  5723   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
  5724   // We need to read this before we make the region non-humongous,
  5725   // otherwise the information will be gone.
  5726   uint last_index = hr->last_hc_index();
  5727   hr->set_notHumongous();
  5728   free_region(hr, &hr_pre_used, free_list, par);
  5730   uint i = hr->hrs_index() + 1;
  5731   while (i < last_index) {
  5732     HeapRegion* curr_hr = region_at(i);
  5733     assert(curr_hr->continuesHumongous(), "invariant");
  5734     curr_hr->set_notHumongous();
  5735     free_region(curr_hr, &hr_pre_used, free_list, par);
  5736     i += 1;
  5738   assert(hr_pre_used == hr_used,
  5739          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
  5740                  "should be the same", hr_pre_used, hr_used));
  5741   *pre_used += hr_pre_used;
  5744 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
  5745                                        FreeRegionList* free_list,
  5746                                        OldRegionSet* old_proxy_set,
  5747                                        HumongousRegionSet* humongous_proxy_set,
  5748                                        bool par) {
  5749   if (pre_used > 0) {
  5750     Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
  5751     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  5752     assert(_summary_bytes_used >= pre_used,
  5753            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
  5754                    "should be >= pre_used: "SIZE_FORMAT,
  5755                    _summary_bytes_used, pre_used));
  5756     _summary_bytes_used -= pre_used;
  5758   if (free_list != NULL && !free_list->is_empty()) {
  5759     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  5760     _free_list.add_as_head(free_list);
  5762   if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
  5763     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
  5764     _old_set.update_from_proxy(old_proxy_set);
  5766   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
  5767     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
  5768     _humongous_set.update_from_proxy(humongous_proxy_set);
  5772 class G1ParCleanupCTTask : public AbstractGangTask {
  5773   CardTableModRefBS* _ct_bs;
  5774   G1CollectedHeap* _g1h;
  5775   HeapRegion* volatile _su_head;
  5776 public:
  5777   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
  5778                      G1CollectedHeap* g1h) :
  5779     AbstractGangTask("G1 Par Cleanup CT Task"),
  5780     _ct_bs(ct_bs), _g1h(g1h) { }
  5782   void work(uint worker_id) {
  5783     HeapRegion* r;
  5784     while (r = _g1h->pop_dirty_cards_region()) {
  5785       clear_cards(r);
  5789   void clear_cards(HeapRegion* r) {
  5790     // Cards of the survivors should have already been dirtied.
  5791     if (!r->is_survivor()) {
  5792       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  5795 };
  5797 #ifndef PRODUCT
  5798 class G1VerifyCardTableCleanup: public HeapRegionClosure {
  5799   G1CollectedHeap* _g1h;
  5800   CardTableModRefBS* _ct_bs;
  5801 public:
  5802   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
  5803     : _g1h(g1h), _ct_bs(ct_bs) { }
  5804   virtual bool doHeapRegion(HeapRegion* r) {
  5805     if (r->is_survivor()) {
  5806       _g1h->verify_dirty_region(r);
  5807     } else {
  5808       _g1h->verify_not_dirty_region(r);
  5810     return false;
  5812 };
  5814 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
  5815   // All of the region should be clean.
  5816   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
  5817   MemRegion mr(hr->bottom(), hr->end());
  5818   ct_bs->verify_not_dirty_region(mr);
  5821 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
  5822   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
  5823   // dirty allocated blocks as they allocate them. The thread that
  5824   // retires each region and replaces it with a new one will do a
  5825   // maximal allocation to fill in [pre_dummy_top(),end()] but will
  5826   // not dirty that area (one less thing to have to do while holding
  5827   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
  5828   // is dirty.
  5829   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
  5830   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
  5831   ct_bs->verify_dirty_region(mr);
  5834 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
  5835   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
  5836   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
  5837     verify_dirty_region(hr);
  5841 void G1CollectedHeap::verify_dirty_young_regions() {
  5842   verify_dirty_young_list(_young_list->first_region());
  5844 #endif
  5846 void G1CollectedHeap::cleanUpCardTable() {
  5847   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  5848   double start = os::elapsedTime();
  5851     // Iterate over the dirty cards region list.
  5852     G1ParCleanupCTTask cleanup_task(ct_bs, this);
  5854     if (G1CollectedHeap::use_parallel_gc_threads()) {
  5855       set_par_threads();
  5856       workers()->run_task(&cleanup_task);
  5857       set_par_threads(0);
  5858     } else {
  5859       while (_dirty_cards_region_list) {
  5860         HeapRegion* r = _dirty_cards_region_list;
  5861         cleanup_task.clear_cards(r);
  5862         _dirty_cards_region_list = r->get_next_dirty_cards_region();
  5863         if (_dirty_cards_region_list == r) {
  5864           // The last region.
  5865           _dirty_cards_region_list = NULL;
  5867         r->set_next_dirty_cards_region(NULL);
  5870 #ifndef PRODUCT
  5871     if (G1VerifyCTCleanup || VerifyAfterGC) {
  5872       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
  5873       heap_region_iterate(&cleanup_verifier);
  5875 #endif
  5878   double elapsed = os::elapsedTime() - start;
  5879   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
  5882 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  5883   size_t pre_used = 0;
  5884   FreeRegionList local_free_list("Local List for CSet Freeing");
  5886   double young_time_ms     = 0.0;
  5887   double non_young_time_ms = 0.0;
  5889   // Since the collection set is a superset of the the young list,
  5890   // all we need to do to clear the young list is clear its
  5891   // head and length, and unlink any young regions in the code below
  5892   _young_list->clear();
  5894   G1CollectorPolicy* policy = g1_policy();
  5896   double start_sec = os::elapsedTime();
  5897   bool non_young = true;
  5899   HeapRegion* cur = cs_head;
  5900   int age_bound = -1;
  5901   size_t rs_lengths = 0;
  5903   while (cur != NULL) {
  5904     assert(!is_on_master_free_list(cur), "sanity");
  5905     if (non_young) {
  5906       if (cur->is_young()) {
  5907         double end_sec = os::elapsedTime();
  5908         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5909         non_young_time_ms += elapsed_ms;
  5911         start_sec = os::elapsedTime();
  5912         non_young = false;
  5914     } else {
  5915       if (!cur->is_young()) {
  5916         double end_sec = os::elapsedTime();
  5917         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5918         young_time_ms += elapsed_ms;
  5920         start_sec = os::elapsedTime();
  5921         non_young = true;
  5925     rs_lengths += cur->rem_set()->occupied();
  5927     HeapRegion* next = cur->next_in_collection_set();
  5928     assert(cur->in_collection_set(), "bad CS");
  5929     cur->set_next_in_collection_set(NULL);
  5930     cur->set_in_collection_set(false);
  5932     if (cur->is_young()) {
  5933       int index = cur->young_index_in_cset();
  5934       assert(index != -1, "invariant");
  5935       assert((uint) index < policy->young_cset_region_length(), "invariant");
  5936       size_t words_survived = _surviving_young_words[index];
  5937       cur->record_surv_words_in_group(words_survived);
  5939       // At this point the we have 'popped' cur from the collection set
  5940       // (linked via next_in_collection_set()) but it is still in the
  5941       // young list (linked via next_young_region()). Clear the
  5942       // _next_young_region field.
  5943       cur->set_next_young_region(NULL);
  5944     } else {
  5945       int index = cur->young_index_in_cset();
  5946       assert(index == -1, "invariant");
  5949     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  5950             (!cur->is_young() && cur->young_index_in_cset() == -1),
  5951             "invariant" );
  5953     if (!cur->evacuation_failed()) {
  5954       MemRegion used_mr = cur->used_region();
  5956       // And the region is empty.
  5957       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
  5958       free_region(cur, &pre_used, &local_free_list, false /* par */);
  5959     } else {
  5960       cur->uninstall_surv_rate_group();
  5961       if (cur->is_young()) {
  5962         cur->set_young_index_in_cset(-1);
  5964       cur->set_not_young();
  5965       cur->set_evacuation_failed(false);
  5966       // The region is now considered to be old.
  5967       _old_set.add(cur);
  5969     cur = next;
  5972   policy->record_max_rs_lengths(rs_lengths);
  5973   policy->cset_regions_freed();
  5975   double end_sec = os::elapsedTime();
  5976   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5978   if (non_young) {
  5979     non_young_time_ms += elapsed_ms;
  5980   } else {
  5981     young_time_ms += elapsed_ms;
  5984   update_sets_after_freeing_regions(pre_used, &local_free_list,
  5985                                     NULL /* old_proxy_set */,
  5986                                     NULL /* humongous_proxy_set */,
  5987                                     false /* par */);
  5988   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
  5989   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
  5992 // This routine is similar to the above but does not record
  5993 // any policy statistics or update free lists; we are abandoning
  5994 // the current incremental collection set in preparation of a
  5995 // full collection. After the full GC we will start to build up
  5996 // the incremental collection set again.
  5997 // This is only called when we're doing a full collection
  5998 // and is immediately followed by the tearing down of the young list.
  6000 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
  6001   HeapRegion* cur = cs_head;
  6003   while (cur != NULL) {
  6004     HeapRegion* next = cur->next_in_collection_set();
  6005     assert(cur->in_collection_set(), "bad CS");
  6006     cur->set_next_in_collection_set(NULL);
  6007     cur->set_in_collection_set(false);
  6008     cur->set_young_index_in_cset(-1);
  6009     cur = next;
  6013 void G1CollectedHeap::set_free_regions_coming() {
  6014   if (G1ConcRegionFreeingVerbose) {
  6015     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  6016                            "setting free regions coming");
  6019   assert(!free_regions_coming(), "pre-condition");
  6020   _free_regions_coming = true;
  6023 void G1CollectedHeap::reset_free_regions_coming() {
  6024   assert(free_regions_coming(), "pre-condition");
  6027     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6028     _free_regions_coming = false;
  6029     SecondaryFreeList_lock->notify_all();
  6032   if (G1ConcRegionFreeingVerbose) {
  6033     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  6034                            "reset free regions coming");
  6038 void G1CollectedHeap::wait_while_free_regions_coming() {
  6039   // Most of the time we won't have to wait, so let's do a quick test
  6040   // first before we take the lock.
  6041   if (!free_regions_coming()) {
  6042     return;
  6045   if (G1ConcRegionFreeingVerbose) {
  6046     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  6047                            "waiting for free regions");
  6051     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6052     while (free_regions_coming()) {
  6053       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
  6057   if (G1ConcRegionFreeingVerbose) {
  6058     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  6059                            "done waiting for free regions");
  6063 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  6064   assert(heap_lock_held_for_gc(),
  6065               "the heap lock should already be held by or for this thread");
  6066   _young_list->push_region(hr);
  6069 class NoYoungRegionsClosure: public HeapRegionClosure {
  6070 private:
  6071   bool _success;
  6072 public:
  6073   NoYoungRegionsClosure() : _success(true) { }
  6074   bool doHeapRegion(HeapRegion* r) {
  6075     if (r->is_young()) {
  6076       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  6077                              r->bottom(), r->end());
  6078       _success = false;
  6080     return false;
  6082   bool success() { return _success; }
  6083 };
  6085 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
  6086   bool ret = _young_list->check_list_empty(check_sample);
  6088   if (check_heap) {
  6089     NoYoungRegionsClosure closure;
  6090     heap_region_iterate(&closure);
  6091     ret = ret && closure.success();
  6094   return ret;
  6097 class TearDownRegionSetsClosure : public HeapRegionClosure {
  6098 private:
  6099   OldRegionSet *_old_set;
  6101 public:
  6102   TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
  6104   bool doHeapRegion(HeapRegion* r) {
  6105     if (r->is_empty()) {
  6106       // We ignore empty regions, we'll empty the free list afterwards
  6107     } else if (r->is_young()) {
  6108       // We ignore young regions, we'll empty the young list afterwards
  6109     } else if (r->isHumongous()) {
  6110       // We ignore humongous regions, we're not tearing down the
  6111       // humongous region set
  6112     } else {
  6113       // The rest should be old
  6114       _old_set->remove(r);
  6116     return false;
  6119   ~TearDownRegionSetsClosure() {
  6120     assert(_old_set->is_empty(), "post-condition");
  6122 };
  6124 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
  6125   assert_at_safepoint(true /* should_be_vm_thread */);
  6127   if (!free_list_only) {
  6128     TearDownRegionSetsClosure cl(&_old_set);
  6129     heap_region_iterate(&cl);
  6131     // Need to do this after the heap iteration to be able to
  6132     // recognize the young regions and ignore them during the iteration.
  6133     _young_list->empty_list();
  6135   _free_list.remove_all();
  6138 class RebuildRegionSetsClosure : public HeapRegionClosure {
  6139 private:
  6140   bool            _free_list_only;
  6141   OldRegionSet*   _old_set;
  6142   FreeRegionList* _free_list;
  6143   size_t          _total_used;
  6145 public:
  6146   RebuildRegionSetsClosure(bool free_list_only,
  6147                            OldRegionSet* old_set, FreeRegionList* free_list) :
  6148     _free_list_only(free_list_only),
  6149     _old_set(old_set), _free_list(free_list), _total_used(0) {
  6150     assert(_free_list->is_empty(), "pre-condition");
  6151     if (!free_list_only) {
  6152       assert(_old_set->is_empty(), "pre-condition");
  6156   bool doHeapRegion(HeapRegion* r) {
  6157     if (r->continuesHumongous()) {
  6158       return false;
  6161     if (r->is_empty()) {
  6162       // Add free regions to the free list
  6163       _free_list->add_as_tail(r);
  6164     } else if (!_free_list_only) {
  6165       assert(!r->is_young(), "we should not come across young regions");
  6167       if (r->isHumongous()) {
  6168         // We ignore humongous regions, we left the humongous set unchanged
  6169       } else {
  6170         // The rest should be old, add them to the old set
  6171         _old_set->add(r);
  6173       _total_used += r->used();
  6176     return false;
  6179   size_t total_used() {
  6180     return _total_used;
  6182 };
  6184 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
  6185   assert_at_safepoint(true /* should_be_vm_thread */);
  6187   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
  6188   heap_region_iterate(&cl);
  6190   if (!free_list_only) {
  6191     _summary_bytes_used = cl.total_used();
  6193   assert(_summary_bytes_used == recalculate_used(),
  6194          err_msg("inconsistent _summary_bytes_used, "
  6195                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
  6196                  _summary_bytes_used, recalculate_used()));
  6199 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  6200   _refine_cte_cl->set_concurrent(concurrent);
  6203 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  6204   HeapRegion* hr = heap_region_containing(p);
  6205   if (hr == NULL) {
  6206     return is_in_permanent(p);
  6207   } else {
  6208     return hr->is_in(p);
  6212 // Methods for the mutator alloc region
  6214 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
  6215                                                       bool force) {
  6216   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6217   assert(!force || g1_policy()->can_expand_young_list(),
  6218          "if force is true we should be able to expand the young list");
  6219   bool young_list_full = g1_policy()->is_young_list_full();
  6220   if (force || !young_list_full) {
  6221     HeapRegion* new_alloc_region = new_region(word_size,
  6222                                               false /* do_expand */);
  6223     if (new_alloc_region != NULL) {
  6224       set_region_short_lived_locked(new_alloc_region);
  6225       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
  6226       return new_alloc_region;
  6229   return NULL;
  6232 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
  6233                                                   size_t allocated_bytes) {
  6234   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6235   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
  6237   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
  6238   _summary_bytes_used += allocated_bytes;
  6239   _hr_printer.retire(alloc_region);
  6240   // We update the eden sizes here, when the region is retired,
  6241   // instead of when it's allocated, since this is the point that its
  6242   // used space has been recored in _summary_bytes_used.
  6243   g1mm()->update_eden_size();
  6246 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
  6247                                                     bool force) {
  6248   return _g1h->new_mutator_alloc_region(word_size, force);
  6251 void G1CollectedHeap::set_par_threads() {
  6252   // Don't change the number of workers.  Use the value previously set
  6253   // in the workgroup.
  6254   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
  6255   uint n_workers = workers()->active_workers();
  6256   assert(UseDynamicNumberOfGCThreads ||
  6257            n_workers == workers()->total_workers(),
  6258       "Otherwise should be using the total number of workers");
  6259   if (n_workers == 0) {
  6260     assert(false, "Should have been set in prior evacuation pause.");
  6261     n_workers = ParallelGCThreads;
  6262     workers()->set_active_workers(n_workers);
  6264   set_par_threads(n_workers);
  6267 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
  6268                                        size_t allocated_bytes) {
  6269   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
  6272 // Methods for the GC alloc regions
  6274 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
  6275                                                  uint count,
  6276                                                  GCAllocPurpose ap) {
  6277   assert(FreeList_lock->owned_by_self(), "pre-condition");
  6279   if (count < g1_policy()->max_regions(ap)) {
  6280     HeapRegion* new_alloc_region = new_region(word_size,
  6281                                               true /* do_expand */);
  6282     if (new_alloc_region != NULL) {
  6283       // We really only need to do this for old regions given that we
  6284       // should never scan survivors. But it doesn't hurt to do it
  6285       // for survivors too.
  6286       new_alloc_region->set_saved_mark();
  6287       if (ap == GCAllocForSurvived) {
  6288         new_alloc_region->set_survivor();
  6289         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
  6290       } else {
  6291         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
  6293       bool during_im = g1_policy()->during_initial_mark_pause();
  6294       new_alloc_region->note_start_of_copying(during_im);
  6295       return new_alloc_region;
  6296     } else {
  6297       g1_policy()->note_alloc_region_limit_reached(ap);
  6300   return NULL;
  6303 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
  6304                                              size_t allocated_bytes,
  6305                                              GCAllocPurpose ap) {
  6306   bool during_im = g1_policy()->during_initial_mark_pause();
  6307   alloc_region->note_end_of_copying(during_im);
  6308   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
  6309   if (ap == GCAllocForSurvived) {
  6310     young_list()->add_survivor_region(alloc_region);
  6311   } else {
  6312     _old_set.add(alloc_region);
  6314   _hr_printer.retire(alloc_region);
  6317 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
  6318                                                        bool force) {
  6319   assert(!force, "not supported for GC alloc regions");
  6320   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
  6323 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
  6324                                           size_t allocated_bytes) {
  6325   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
  6326                                GCAllocForSurvived);
  6329 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
  6330                                                   bool force) {
  6331   assert(!force, "not supported for GC alloc regions");
  6332   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
  6335 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
  6336                                      size_t allocated_bytes) {
  6337   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
  6338                                GCAllocForTenured);
  6340 // Heap region set verification
  6342 class VerifyRegionListsClosure : public HeapRegionClosure {
  6343 private:
  6344   FreeRegionList*     _free_list;
  6345   OldRegionSet*       _old_set;
  6346   HumongousRegionSet* _humongous_set;
  6347   uint                _region_count;
  6349 public:
  6350   VerifyRegionListsClosure(OldRegionSet* old_set,
  6351                            HumongousRegionSet* humongous_set,
  6352                            FreeRegionList* free_list) :
  6353     _old_set(old_set), _humongous_set(humongous_set),
  6354     _free_list(free_list), _region_count(0) { }
  6356   uint region_count() { return _region_count; }
  6358   bool doHeapRegion(HeapRegion* hr) {
  6359     _region_count += 1;
  6361     if (hr->continuesHumongous()) {
  6362       return false;
  6365     if (hr->is_young()) {
  6366       // TODO
  6367     } else if (hr->startsHumongous()) {
  6368       _humongous_set->verify_next_region(hr);
  6369     } else if (hr->is_empty()) {
  6370       _free_list->verify_next_region(hr);
  6371     } else {
  6372       _old_set->verify_next_region(hr);
  6374     return false;
  6376 };
  6378 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
  6379                                              HeapWord* bottom) {
  6380   HeapWord* end = bottom + HeapRegion::GrainWords;
  6381   MemRegion mr(bottom, end);
  6382   assert(_g1_reserved.contains(mr), "invariant");
  6383   // This might return NULL if the allocation fails
  6384   return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
  6387 void G1CollectedHeap::verify_region_sets() {
  6388   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6390   // First, check the explicit lists.
  6391   _free_list.verify();
  6393     // Given that a concurrent operation might be adding regions to
  6394     // the secondary free list we have to take the lock before
  6395     // verifying it.
  6396     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6397     _secondary_free_list.verify();
  6399   _old_set.verify();
  6400   _humongous_set.verify();
  6402   // If a concurrent region freeing operation is in progress it will
  6403   // be difficult to correctly attributed any free regions we come
  6404   // across to the correct free list given that they might belong to
  6405   // one of several (free_list, secondary_free_list, any local lists,
  6406   // etc.). So, if that's the case we will skip the rest of the
  6407   // verification operation. Alternatively, waiting for the concurrent
  6408   // operation to complete will have a non-trivial effect on the GC's
  6409   // operation (no concurrent operation will last longer than the
  6410   // interval between two calls to verification) and it might hide
  6411   // any issues that we would like to catch during testing.
  6412   if (free_regions_coming()) {
  6413     return;
  6416   // Make sure we append the secondary_free_list on the free_list so
  6417   // that all free regions we will come across can be safely
  6418   // attributed to the free_list.
  6419   append_secondary_free_list_if_not_empty_with_lock();
  6421   // Finally, make sure that the region accounting in the lists is
  6422   // consistent with what we see in the heap.
  6423   _old_set.verify_start();
  6424   _humongous_set.verify_start();
  6425   _free_list.verify_start();
  6427   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
  6428   heap_region_iterate(&cl);
  6430   _old_set.verify_end();
  6431   _humongous_set.verify_end();
  6432   _free_list.verify_end();

mercurial