src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Mon, 24 Mar 2014 15:30:36 +0100

author
tschatzl
date
Mon, 24 Mar 2014 15:30:36 +0100
changeset 6404
96b1c2e06e25
parent 6402
191174b49bec
child 6405
a07bea31ef35
permissions
-rw-r--r--

8027295: Free CSet takes ~50% of young pause time
Summary: Improve fast card cache iteration and avoid taking locks when freeing the collection set.
Reviewed-by: brutisso

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/codeCache.hpp"
    27 #include "code/icBuffer.hpp"
    28 #include "gc_implementation/g1/bufferingOopClosure.hpp"
    29 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    30 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    31 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    32 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    33 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    34 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    35 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    36 #include "gc_implementation/g1/g1EvacFailure.hpp"
    37 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
    38 #include "gc_implementation/g1/g1Log.hpp"
    39 #include "gc_implementation/g1/g1MarkSweep.hpp"
    40 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    41 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    42 #include "gc_implementation/g1/g1YCTypes.hpp"
    43 #include "gc_implementation/g1/heapRegion.inline.hpp"
    44 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    45 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    46 #include "gc_implementation/g1/vm_operations_g1.hpp"
    47 #include "gc_implementation/shared/gcHeapSummary.hpp"
    48 #include "gc_implementation/shared/gcTimer.hpp"
    49 #include "gc_implementation/shared/gcTrace.hpp"
    50 #include "gc_implementation/shared/gcTraceTime.hpp"
    51 #include "gc_implementation/shared/isGCActiveMark.hpp"
    52 #include "memory/gcLocker.inline.hpp"
    53 #include "memory/generationSpec.hpp"
    54 #include "memory/iterator.hpp"
    55 #include "memory/referenceProcessor.hpp"
    56 #include "oops/oop.inline.hpp"
    57 #include "oops/oop.pcgc.inline.hpp"
    58 #include "runtime/vmThread.hpp"
    59 #include "utilities/ticks.hpp"
    61 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
    63 // turn it on so that the contents of the young list (scan-only /
    64 // to-be-collected) are printed at "strategic" points before / during
    65 // / after the collection --- this is useful for debugging
    66 #define YOUNG_LIST_VERBOSE 0
    67 // CURRENT STATUS
    68 // This file is under construction.  Search for "FIXME".
    70 // INVARIANTS/NOTES
    71 //
    72 // All allocation activity covered by the G1CollectedHeap interface is
    73 // serialized by acquiring the HeapLock.  This happens in mem_allocate
    74 // and allocate_new_tlab, which are the "entry" points to the
    75 // allocation code from the rest of the JVM.  (Note that this does not
    76 // apply to TLAB allocation, which is not part of this interface: it
    77 // is done by clients of this interface.)
    79 // Notes on implementation of parallelism in different tasks.
    80 //
    81 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
    82 // The number of GC workers is passed to heap_region_par_iterate_chunked().
    83 // It does use run_task() which sets _n_workers in the task.
    84 // G1ParTask executes g1_process_strong_roots() ->
    85 // SharedHeap::process_strong_roots() which calls eventually to
    86 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
    87 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
    88 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
    89 //
    91 // Local to this file.
    93 class RefineCardTableEntryClosure: public CardTableEntryClosure {
    94   SuspendibleThreadSet* _sts;
    95   G1RemSet* _g1rs;
    96   ConcurrentG1Refine* _cg1r;
    97   bool _concurrent;
    98 public:
    99   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
   100                               G1RemSet* g1rs,
   101                               ConcurrentG1Refine* cg1r) :
   102     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
   103   {}
   104   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   105     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
   106     // This path is executed by the concurrent refine or mutator threads,
   107     // concurrently, and so we do not care if card_ptr contains references
   108     // that point into the collection set.
   109     assert(!oops_into_cset, "should be");
   111     if (_concurrent && _sts->should_yield()) {
   112       // Caller will actually yield.
   113       return false;
   114     }
   115     // Otherwise, we finished successfully; return true.
   116     return true;
   117   }
   118   void set_concurrent(bool b) { _concurrent = b; }
   119 };
   122 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
   123   int _calls;
   124   G1CollectedHeap* _g1h;
   125   CardTableModRefBS* _ctbs;
   126   int _histo[256];
   127 public:
   128   ClearLoggedCardTableEntryClosure() :
   129     _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
   130   {
   131     for (int i = 0; i < 256; i++) _histo[i] = 0;
   132   }
   133   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   134     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   135       _calls++;
   136       unsigned char* ujb = (unsigned char*)card_ptr;
   137       int ind = (int)(*ujb);
   138       _histo[ind]++;
   139       *card_ptr = -1;
   140     }
   141     return true;
   142   }
   143   int calls() { return _calls; }
   144   void print_histo() {
   145     gclog_or_tty->print_cr("Card table value histogram:");
   146     for (int i = 0; i < 256; i++) {
   147       if (_histo[i] != 0) {
   148         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
   149       }
   150     }
   151   }
   152 };
   154 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
   155   int _calls;
   156   G1CollectedHeap* _g1h;
   157   CardTableModRefBS* _ctbs;
   158 public:
   159   RedirtyLoggedCardTableEntryClosure() :
   160     _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
   162   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   163     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   164       _calls++;
   165       *card_ptr = 0;
   166     }
   167     return true;
   168   }
   169   int calls() { return _calls; }
   170 };
   172 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
   173 public:
   174   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   175     *card_ptr = CardTableModRefBS::dirty_card_val();
   176     return true;
   177   }
   178 };
   180 YoungList::YoungList(G1CollectedHeap* g1h) :
   181     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
   182     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
   183   guarantee(check_list_empty(false), "just making sure...");
   184 }
   186 void YoungList::push_region(HeapRegion *hr) {
   187   assert(!hr->is_young(), "should not already be young");
   188   assert(hr->get_next_young_region() == NULL, "cause it should!");
   190   hr->set_next_young_region(_head);
   191   _head = hr;
   193   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
   194   ++_length;
   195 }
   197 void YoungList::add_survivor_region(HeapRegion* hr) {
   198   assert(hr->is_survivor(), "should be flagged as survivor region");
   199   assert(hr->get_next_young_region() == NULL, "cause it should!");
   201   hr->set_next_young_region(_survivor_head);
   202   if (_survivor_head == NULL) {
   203     _survivor_tail = hr;
   204   }
   205   _survivor_head = hr;
   206   ++_survivor_length;
   207 }
   209 void YoungList::empty_list(HeapRegion* list) {
   210   while (list != NULL) {
   211     HeapRegion* next = list->get_next_young_region();
   212     list->set_next_young_region(NULL);
   213     list->uninstall_surv_rate_group();
   214     list->set_not_young();
   215     list = next;
   216   }
   217 }
   219 void YoungList::empty_list() {
   220   assert(check_list_well_formed(), "young list should be well formed");
   222   empty_list(_head);
   223   _head = NULL;
   224   _length = 0;
   226   empty_list(_survivor_head);
   227   _survivor_head = NULL;
   228   _survivor_tail = NULL;
   229   _survivor_length = 0;
   231   _last_sampled_rs_lengths = 0;
   233   assert(check_list_empty(false), "just making sure...");
   234 }
   236 bool YoungList::check_list_well_formed() {
   237   bool ret = true;
   239   uint length = 0;
   240   HeapRegion* curr = _head;
   241   HeapRegion* last = NULL;
   242   while (curr != NULL) {
   243     if (!curr->is_young()) {
   244       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   245                              "incorrectly tagged (y: %d, surv: %d)",
   246                              curr->bottom(), curr->end(),
   247                              curr->is_young(), curr->is_survivor());
   248       ret = false;
   249     }
   250     ++length;
   251     last = curr;
   252     curr = curr->get_next_young_region();
   253   }
   254   ret = ret && (length == _length);
   256   if (!ret) {
   257     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   258     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
   259                            length, _length);
   260   }
   262   return ret;
   263 }
   265 bool YoungList::check_list_empty(bool check_sample) {
   266   bool ret = true;
   268   if (_length != 0) {
   269     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
   270                   _length);
   271     ret = false;
   272   }
   273   if (check_sample && _last_sampled_rs_lengths != 0) {
   274     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   275     ret = false;
   276   }
   277   if (_head != NULL) {
   278     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   279     ret = false;
   280   }
   281   if (!ret) {
   282     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   283   }
   285   return ret;
   286 }
   288 void
   289 YoungList::rs_length_sampling_init() {
   290   _sampled_rs_lengths = 0;
   291   _curr               = _head;
   292 }
   294 bool
   295 YoungList::rs_length_sampling_more() {
   296   return _curr != NULL;
   297 }
   299 void
   300 YoungList::rs_length_sampling_next() {
   301   assert( _curr != NULL, "invariant" );
   302   size_t rs_length = _curr->rem_set()->occupied();
   304   _sampled_rs_lengths += rs_length;
   306   // The current region may not yet have been added to the
   307   // incremental collection set (it gets added when it is
   308   // retired as the current allocation region).
   309   if (_curr->in_collection_set()) {
   310     // Update the collection set policy information for this region
   311     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
   312   }
   314   _curr = _curr->get_next_young_region();
   315   if (_curr == NULL) {
   316     _last_sampled_rs_lengths = _sampled_rs_lengths;
   317     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   318   }
   319 }
   321 void
   322 YoungList::reset_auxilary_lists() {
   323   guarantee( is_empty(), "young list should be empty" );
   324   assert(check_list_well_formed(), "young list should be well formed");
   326   // Add survivor regions to SurvRateGroup.
   327   _g1h->g1_policy()->note_start_adding_survivor_regions();
   328   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   330   int young_index_in_cset = 0;
   331   for (HeapRegion* curr = _survivor_head;
   332        curr != NULL;
   333        curr = curr->get_next_young_region()) {
   334     _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
   336     // The region is a non-empty survivor so let's add it to
   337     // the incremental collection set for the next evacuation
   338     // pause.
   339     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   340     young_index_in_cset += 1;
   341   }
   342   assert((uint) young_index_in_cset == _survivor_length, "post-condition");
   343   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   345   _head   = _survivor_head;
   346   _length = _survivor_length;
   347   if (_survivor_head != NULL) {
   348     assert(_survivor_tail != NULL, "cause it shouldn't be");
   349     assert(_survivor_length > 0, "invariant");
   350     _survivor_tail->set_next_young_region(NULL);
   351   }
   353   // Don't clear the survivor list handles until the start of
   354   // the next evacuation pause - we need it in order to re-tag
   355   // the survivor regions from this evacuation pause as 'young'
   356   // at the start of the next.
   358   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   360   assert(check_list_well_formed(), "young list should be well formed");
   361 }
   363 void YoungList::print() {
   364   HeapRegion* lists[] = {_head,   _survivor_head};
   365   const char* names[] = {"YOUNG", "SURVIVOR"};
   367   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   368     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   369     HeapRegion *curr = lists[list];
   370     if (curr == NULL)
   371       gclog_or_tty->print_cr("  empty");
   372     while (curr != NULL) {
   373       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
   374                              HR_FORMAT_PARAMS(curr),
   375                              curr->prev_top_at_mark_start(),
   376                              curr->next_top_at_mark_start(),
   377                              curr->age_in_surv_rate_group_cond());
   378       curr = curr->get_next_young_region();
   379     }
   380   }
   382   gclog_or_tty->print_cr("");
   383 }
   385 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   386 {
   387   // Claim the right to put the region on the dirty cards region list
   388   // by installing a self pointer.
   389   HeapRegion* next = hr->get_next_dirty_cards_region();
   390   if (next == NULL) {
   391     HeapRegion* res = (HeapRegion*)
   392       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   393                           NULL);
   394     if (res == NULL) {
   395       HeapRegion* head;
   396       do {
   397         // Put the region to the dirty cards region list.
   398         head = _dirty_cards_region_list;
   399         next = (HeapRegion*)
   400           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   401         if (next == head) {
   402           assert(hr->get_next_dirty_cards_region() == hr,
   403                  "hr->get_next_dirty_cards_region() != hr");
   404           if (next == NULL) {
   405             // The last region in the list points to itself.
   406             hr->set_next_dirty_cards_region(hr);
   407           } else {
   408             hr->set_next_dirty_cards_region(next);
   409           }
   410         }
   411       } while (next != head);
   412     }
   413   }
   414 }
   416 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   417 {
   418   HeapRegion* head;
   419   HeapRegion* hr;
   420   do {
   421     head = _dirty_cards_region_list;
   422     if (head == NULL) {
   423       return NULL;
   424     }
   425     HeapRegion* new_head = head->get_next_dirty_cards_region();
   426     if (head == new_head) {
   427       // The last region.
   428       new_head = NULL;
   429     }
   430     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   431                                           head);
   432   } while (hr != head);
   433   assert(hr != NULL, "invariant");
   434   hr->set_next_dirty_cards_region(NULL);
   435   return hr;
   436 }
   438 void G1CollectedHeap::stop_conc_gc_threads() {
   439   _cg1r->stop();
   440   _cmThread->stop();
   441 }
   443 #ifdef ASSERT
   444 // A region is added to the collection set as it is retired
   445 // so an address p can point to a region which will be in the
   446 // collection set but has not yet been retired.  This method
   447 // therefore is only accurate during a GC pause after all
   448 // regions have been retired.  It is used for debugging
   449 // to check if an nmethod has references to objects that can
   450 // be move during a partial collection.  Though it can be
   451 // inaccurate, it is sufficient for G1 because the conservative
   452 // implementation of is_scavengable() for G1 will indicate that
   453 // all nmethods must be scanned during a partial collection.
   454 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
   455   HeapRegion* hr = heap_region_containing(p);
   456   return hr != NULL && hr->in_collection_set();
   457 }
   458 #endif
   460 // Returns true if the reference points to an object that
   461 // can move in an incremental collection.
   462 bool G1CollectedHeap::is_scavengable(const void* p) {
   463   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   464   G1CollectorPolicy* g1p = g1h->g1_policy();
   465   HeapRegion* hr = heap_region_containing(p);
   466   if (hr == NULL) {
   467      // null
   468      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
   469      return false;
   470   } else {
   471     return !hr->isHumongous();
   472   }
   473 }
   475 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   476   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   477   CardTableModRefBS* ct_bs = g1_barrier_set();
   479   // Count the dirty cards at the start.
   480   CountNonCleanMemRegionClosure count1(this);
   481   ct_bs->mod_card_iterate(&count1);
   482   int orig_count = count1.n();
   484   // First clear the logged cards.
   485   ClearLoggedCardTableEntryClosure clear;
   486   dcqs.set_closure(&clear);
   487   dcqs.apply_closure_to_all_completed_buffers();
   488   dcqs.iterate_closure_all_threads(false);
   489   clear.print_histo();
   491   // Now ensure that there's no dirty cards.
   492   CountNonCleanMemRegionClosure count2(this);
   493   ct_bs->mod_card_iterate(&count2);
   494   if (count2.n() != 0) {
   495     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   496                            count2.n(), orig_count);
   497   }
   498   guarantee(count2.n() == 0, "Card table should be clean.");
   500   RedirtyLoggedCardTableEntryClosure redirty;
   501   JavaThread::dirty_card_queue_set().set_closure(&redirty);
   502   dcqs.apply_closure_to_all_completed_buffers();
   503   dcqs.iterate_closure_all_threads(false);
   504   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   505                          clear.calls(), orig_count);
   506   guarantee(redirty.calls() == clear.calls(),
   507             "Or else mechanism is broken.");
   509   CountNonCleanMemRegionClosure count3(this);
   510   ct_bs->mod_card_iterate(&count3);
   511   if (count3.n() != orig_count) {
   512     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   513                            orig_count, count3.n());
   514     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   515   }
   517   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
   518 }
   520 // Private class members.
   522 G1CollectedHeap* G1CollectedHeap::_g1h;
   524 // Private methods.
   526 HeapRegion*
   527 G1CollectedHeap::new_region_try_secondary_free_list() {
   528   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   529   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
   530     if (!_secondary_free_list.is_empty()) {
   531       if (G1ConcRegionFreeingVerbose) {
   532         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   533                                "secondary_free_list has %u entries",
   534                                _secondary_free_list.length());
   535       }
   536       // It looks as if there are free regions available on the
   537       // secondary_free_list. Let's move them to the free_list and try
   538       // again to allocate from it.
   539       append_secondary_free_list();
   541       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
   542              "empty we should have moved at least one entry to the free_list");
   543       HeapRegion* res = _free_list.remove_head();
   544       if (G1ConcRegionFreeingVerbose) {
   545         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   546                                "allocated "HR_FORMAT" from secondary_free_list",
   547                                HR_FORMAT_PARAMS(res));
   548       }
   549       return res;
   550     }
   552     // Wait here until we get notified either when (a) there are no
   553     // more free regions coming or (b) some regions have been moved on
   554     // the secondary_free_list.
   555     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
   556   }
   558   if (G1ConcRegionFreeingVerbose) {
   559     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   560                            "could not allocate from secondary_free_list");
   561   }
   562   return NULL;
   563 }
   565 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
   566   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
   567          "the only time we use this to allocate a humongous region is "
   568          "when we are allocating a single humongous region");
   570   HeapRegion* res;
   571   if (G1StressConcRegionFreeing) {
   572     if (!_secondary_free_list.is_empty()) {
   573       if (G1ConcRegionFreeingVerbose) {
   574         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   575                                "forced to look at the secondary_free_list");
   576       }
   577       res = new_region_try_secondary_free_list();
   578       if (res != NULL) {
   579         return res;
   580       }
   581     }
   582   }
   583   res = _free_list.remove_head_or_null();
   584   if (res == NULL) {
   585     if (G1ConcRegionFreeingVerbose) {
   586       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   587                              "res == NULL, trying the secondary_free_list");
   588     }
   589     res = new_region_try_secondary_free_list();
   590   }
   591   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
   592     // Currently, only attempts to allocate GC alloc regions set
   593     // do_expand to true. So, we should only reach here during a
   594     // safepoint. If this assumption changes we might have to
   595     // reconsider the use of _expand_heap_after_alloc_failure.
   596     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
   598     ergo_verbose1(ErgoHeapSizing,
   599                   "attempt heap expansion",
   600                   ergo_format_reason("region allocation request failed")
   601                   ergo_format_byte("allocation request"),
   602                   word_size * HeapWordSize);
   603     if (expand(word_size * HeapWordSize)) {
   604       // Given that expand() succeeded in expanding the heap, and we
   605       // always expand the heap by an amount aligned to the heap
   606       // region size, the free list should in theory not be empty. So
   607       // it would probably be OK to use remove_head(). But the extra
   608       // check for NULL is unlikely to be a performance issue here (we
   609       // just expanded the heap!) so let's just be conservative and
   610       // use remove_head_or_null().
   611       res = _free_list.remove_head_or_null();
   612     } else {
   613       _expand_heap_after_alloc_failure = false;
   614     }
   615   }
   616   return res;
   617 }
   619 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
   620                                                         size_t word_size) {
   621   assert(isHumongous(word_size), "word_size should be humongous");
   622   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   624   uint first = G1_NULL_HRS_INDEX;
   625   if (num_regions == 1) {
   626     // Only one region to allocate, no need to go through the slower
   627     // path. The caller will attempt the expansion if this fails, so
   628     // let's not try to expand here too.
   629     HeapRegion* hr = new_region(word_size, false /* do_expand */);
   630     if (hr != NULL) {
   631       first = hr->hrs_index();
   632     } else {
   633       first = G1_NULL_HRS_INDEX;
   634     }
   635   } else {
   636     // We can't allocate humongous regions while cleanupComplete() is
   637     // running, since some of the regions we find to be empty might not
   638     // yet be added to the free list and it is not straightforward to
   639     // know which list they are on so that we can remove them. Note
   640     // that we only need to do this if we need to allocate more than
   641     // one region to satisfy the current humongous allocation
   642     // request. If we are only allocating one region we use the common
   643     // region allocation code (see above).
   644     wait_while_free_regions_coming();
   645     append_secondary_free_list_if_not_empty_with_lock();
   647     if (free_regions() >= num_regions) {
   648       first = _hrs.find_contiguous(num_regions);
   649       if (first != G1_NULL_HRS_INDEX) {
   650         for (uint i = first; i < first + num_regions; ++i) {
   651           HeapRegion* hr = region_at(i);
   652           assert(hr->is_empty(), "sanity");
   653           assert(is_on_master_free_list(hr), "sanity");
   654           hr->set_pending_removal(true);
   655         }
   656         _free_list.remove_all_pending(num_regions);
   657       }
   658     }
   659   }
   660   return first;
   661 }
   663 HeapWord*
   664 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
   665                                                            uint num_regions,
   666                                                            size_t word_size) {
   667   assert(first != G1_NULL_HRS_INDEX, "pre-condition");
   668   assert(isHumongous(word_size), "word_size should be humongous");
   669   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   671   // Index of last region in the series + 1.
   672   uint last = first + num_regions;
   674   // We need to initialize the region(s) we just discovered. This is
   675   // a bit tricky given that it can happen concurrently with
   676   // refinement threads refining cards on these regions and
   677   // potentially wanting to refine the BOT as they are scanning
   678   // those cards (this can happen shortly after a cleanup; see CR
   679   // 6991377). So we have to set up the region(s) carefully and in
   680   // a specific order.
   682   // The word size sum of all the regions we will allocate.
   683   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
   684   assert(word_size <= word_size_sum, "sanity");
   686   // This will be the "starts humongous" region.
   687   HeapRegion* first_hr = region_at(first);
   688   // The header of the new object will be placed at the bottom of
   689   // the first region.
   690   HeapWord* new_obj = first_hr->bottom();
   691   // This will be the new end of the first region in the series that
   692   // should also match the end of the last region in the series.
   693   HeapWord* new_end = new_obj + word_size_sum;
   694   // This will be the new top of the first region that will reflect
   695   // this allocation.
   696   HeapWord* new_top = new_obj + word_size;
   698   // First, we need to zero the header of the space that we will be
   699   // allocating. When we update top further down, some refinement
   700   // threads might try to scan the region. By zeroing the header we
   701   // ensure that any thread that will try to scan the region will
   702   // come across the zero klass word and bail out.
   703   //
   704   // NOTE: It would not have been correct to have used
   705   // CollectedHeap::fill_with_object() and make the space look like
   706   // an int array. The thread that is doing the allocation will
   707   // later update the object header to a potentially different array
   708   // type and, for a very short period of time, the klass and length
   709   // fields will be inconsistent. This could cause a refinement
   710   // thread to calculate the object size incorrectly.
   711   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   713   // We will set up the first region as "starts humongous". This
   714   // will also update the BOT covering all the regions to reflect
   715   // that there is a single object that starts at the bottom of the
   716   // first region.
   717   first_hr->set_startsHumongous(new_top, new_end);
   719   // Then, if there are any, we will set up the "continues
   720   // humongous" regions.
   721   HeapRegion* hr = NULL;
   722   for (uint i = first + 1; i < last; ++i) {
   723     hr = region_at(i);
   724     hr->set_continuesHumongous(first_hr);
   725   }
   726   // If we have "continues humongous" regions (hr != NULL), then the
   727   // end of the last one should match new_end.
   728   assert(hr == NULL || hr->end() == new_end, "sanity");
   730   // Up to this point no concurrent thread would have been able to
   731   // do any scanning on any region in this series. All the top
   732   // fields still point to bottom, so the intersection between
   733   // [bottom,top] and [card_start,card_end] will be empty. Before we
   734   // update the top fields, we'll do a storestore to make sure that
   735   // no thread sees the update to top before the zeroing of the
   736   // object header and the BOT initialization.
   737   OrderAccess::storestore();
   739   // Now that the BOT and the object header have been initialized,
   740   // we can update top of the "starts humongous" region.
   741   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
   742          "new_top should be in this region");
   743   first_hr->set_top(new_top);
   744   if (_hr_printer.is_active()) {
   745     HeapWord* bottom = first_hr->bottom();
   746     HeapWord* end = first_hr->orig_end();
   747     if ((first + 1) == last) {
   748       // the series has a single humongous region
   749       _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
   750     } else {
   751       // the series has more than one humongous regions
   752       _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
   753     }
   754   }
   756   // Now, we will update the top fields of the "continues humongous"
   757   // regions. The reason we need to do this is that, otherwise,
   758   // these regions would look empty and this will confuse parts of
   759   // G1. For example, the code that looks for a consecutive number
   760   // of empty regions will consider them empty and try to
   761   // re-allocate them. We can extend is_empty() to also include
   762   // !continuesHumongous(), but it is easier to just update the top
   763   // fields here. The way we set top for all regions (i.e., top ==
   764   // end for all regions but the last one, top == new_top for the
   765   // last one) is actually used when we will free up the humongous
   766   // region in free_humongous_region().
   767   hr = NULL;
   768   for (uint i = first + 1; i < last; ++i) {
   769     hr = region_at(i);
   770     if ((i + 1) == last) {
   771       // last continues humongous region
   772       assert(hr->bottom() < new_top && new_top <= hr->end(),
   773              "new_top should fall on this region");
   774       hr->set_top(new_top);
   775       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
   776     } else {
   777       // not last one
   778       assert(new_top > hr->end(), "new_top should be above this region");
   779       hr->set_top(hr->end());
   780       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
   781     }
   782   }
   783   // If we have continues humongous regions (hr != NULL), then the
   784   // end of the last one should match new_end and its top should
   785   // match new_top.
   786   assert(hr == NULL ||
   787          (hr->end() == new_end && hr->top() == new_top), "sanity");
   789   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
   790   _summary_bytes_used += first_hr->used();
   791   _humongous_set.add(first_hr);
   793   return new_obj;
   794 }
   796 // If could fit into free regions w/o expansion, try.
   797 // Otherwise, if can expand, do so.
   798 // Otherwise, if using ex regions might help, try with ex given back.
   799 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   800   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   802   verify_region_sets_optional();
   804   size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
   805   uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
   806   uint x_num = expansion_regions();
   807   uint fs = _hrs.free_suffix();
   808   uint first = humongous_obj_allocate_find_first(num_regions, word_size);
   809   if (first == G1_NULL_HRS_INDEX) {
   810     // The only thing we can do now is attempt expansion.
   811     if (fs + x_num >= num_regions) {
   812       // If the number of regions we're trying to allocate for this
   813       // object is at most the number of regions in the free suffix,
   814       // then the call to humongous_obj_allocate_find_first() above
   815       // should have succeeded and we wouldn't be here.
   816       //
   817       // We should only be trying to expand when the free suffix is
   818       // not sufficient for the object _and_ we have some expansion
   819       // room available.
   820       assert(num_regions > fs, "earlier allocation should have succeeded");
   822       ergo_verbose1(ErgoHeapSizing,
   823                     "attempt heap expansion",
   824                     ergo_format_reason("humongous allocation request failed")
   825                     ergo_format_byte("allocation request"),
   826                     word_size * HeapWordSize);
   827       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
   828         // Even though the heap was expanded, it might not have
   829         // reached the desired size. So, we cannot assume that the
   830         // allocation will succeed.
   831         first = humongous_obj_allocate_find_first(num_regions, word_size);
   832       }
   833     }
   834   }
   836   HeapWord* result = NULL;
   837   if (first != G1_NULL_HRS_INDEX) {
   838     result =
   839       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
   840     assert(result != NULL, "it should always return a valid result");
   842     // A successful humongous object allocation changes the used space
   843     // information of the old generation so we need to recalculate the
   844     // sizes and update the jstat counters here.
   845     g1mm()->update_sizes();
   846   }
   848   verify_region_sets_optional();
   850   return result;
   851 }
   853 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   854   assert_heap_not_locked_and_not_at_safepoint();
   855   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
   857   unsigned int dummy_gc_count_before;
   858   int dummy_gclocker_retry_count = 0;
   859   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
   860 }
   862 HeapWord*
   863 G1CollectedHeap::mem_allocate(size_t word_size,
   864                               bool*  gc_overhead_limit_was_exceeded) {
   865   assert_heap_not_locked_and_not_at_safepoint();
   867   // Loop until the allocation is satisfied, or unsatisfied after GC.
   868   for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
   869     unsigned int gc_count_before;
   871     HeapWord* result = NULL;
   872     if (!isHumongous(word_size)) {
   873       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
   874     } else {
   875       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
   876     }
   877     if (result != NULL) {
   878       return result;
   879     }
   881     // Create the garbage collection operation...
   882     VM_G1CollectForAllocation op(gc_count_before, word_size);
   883     // ...and get the VM thread to execute it.
   884     VMThread::execute(&op);
   886     if (op.prologue_succeeded() && op.pause_succeeded()) {
   887       // If the operation was successful we'll return the result even
   888       // if it is NULL. If the allocation attempt failed immediately
   889       // after a Full GC, it's unlikely we'll be able to allocate now.
   890       HeapWord* result = op.result();
   891       if (result != NULL && !isHumongous(word_size)) {
   892         // Allocations that take place on VM operations do not do any
   893         // card dirtying and we have to do it here. We only have to do
   894         // this for non-humongous allocations, though.
   895         dirty_young_block(result, word_size);
   896       }
   897       return result;
   898     } else {
   899       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
   900         return NULL;
   901       }
   902       assert(op.result() == NULL,
   903              "the result should be NULL if the VM op did not succeed");
   904     }
   906     // Give a warning if we seem to be looping forever.
   907     if ((QueuedAllocationWarningCount > 0) &&
   908         (try_count % QueuedAllocationWarningCount == 0)) {
   909       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
   910     }
   911   }
   913   ShouldNotReachHere();
   914   return NULL;
   915 }
   917 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   918                                            unsigned int *gc_count_before_ret,
   919                                            int* gclocker_retry_count_ret) {
   920   // Make sure you read the note in attempt_allocation_humongous().
   922   assert_heap_not_locked_and_not_at_safepoint();
   923   assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
   924          "be called for humongous allocation requests");
   926   // We should only get here after the first-level allocation attempt
   927   // (attempt_allocation()) failed to allocate.
   929   // We will loop until a) we manage to successfully perform the
   930   // allocation or b) we successfully schedule a collection which
   931   // fails to perform the allocation. b) is the only case when we'll
   932   // return NULL.
   933   HeapWord* result = NULL;
   934   for (int try_count = 1; /* we'll return */; try_count += 1) {
   935     bool should_try_gc;
   936     unsigned int gc_count_before;
   938     {
   939       MutexLockerEx x(Heap_lock);
   941       result = _mutator_alloc_region.attempt_allocation_locked(word_size,
   942                                                       false /* bot_updates */);
   943       if (result != NULL) {
   944         return result;
   945       }
   947       // If we reach here, attempt_allocation_locked() above failed to
   948       // allocate a new region. So the mutator alloc region should be NULL.
   949       assert(_mutator_alloc_region.get() == NULL, "only way to get here");
   951       if (GC_locker::is_active_and_needs_gc()) {
   952         if (g1_policy()->can_expand_young_list()) {
   953           // No need for an ergo verbose message here,
   954           // can_expand_young_list() does this when it returns true.
   955           result = _mutator_alloc_region.attempt_allocation_force(word_size,
   956                                                       false /* bot_updates */);
   957           if (result != NULL) {
   958             return result;
   959           }
   960         }
   961         should_try_gc = false;
   962       } else {
   963         // The GCLocker may not be active but the GCLocker initiated
   964         // GC may not yet have been performed (GCLocker::needs_gc()
   965         // returns true). In this case we do not try this GC and
   966         // wait until the GCLocker initiated GC is performed, and
   967         // then retry the allocation.
   968         if (GC_locker::needs_gc()) {
   969           should_try_gc = false;
   970         } else {
   971           // Read the GC count while still holding the Heap_lock.
   972           gc_count_before = total_collections();
   973           should_try_gc = true;
   974         }
   975       }
   976     }
   978     if (should_try_gc) {
   979       bool succeeded;
   980       result = do_collection_pause(word_size, gc_count_before, &succeeded,
   981           GCCause::_g1_inc_collection_pause);
   982       if (result != NULL) {
   983         assert(succeeded, "only way to get back a non-NULL result");
   984         return result;
   985       }
   987       if (succeeded) {
   988         // If we get here we successfully scheduled a collection which
   989         // failed to allocate. No point in trying to allocate
   990         // further. We'll just return NULL.
   991         MutexLockerEx x(Heap_lock);
   992         *gc_count_before_ret = total_collections();
   993         return NULL;
   994       }
   995     } else {
   996       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
   997         MutexLockerEx x(Heap_lock);
   998         *gc_count_before_ret = total_collections();
   999         return NULL;
  1001       // The GCLocker is either active or the GCLocker initiated
  1002       // GC has not yet been performed. Stall until it is and
  1003       // then retry the allocation.
  1004       GC_locker::stall_until_clear();
  1005       (*gclocker_retry_count_ret) += 1;
  1008     // We can reach here if we were unsuccessful in scheduling a
  1009     // collection (because another thread beat us to it) or if we were
  1010     // stalled due to the GC locker. In either can we should retry the
  1011     // allocation attempt in case another thread successfully
  1012     // performed a collection and reclaimed enough space. We do the
  1013     // first attempt (without holding the Heap_lock) here and the
  1014     // follow-on attempt will be at the start of the next loop
  1015     // iteration (after taking the Heap_lock).
  1016     result = _mutator_alloc_region.attempt_allocation(word_size,
  1017                                                       false /* bot_updates */);
  1018     if (result != NULL) {
  1019       return result;
  1022     // Give a warning if we seem to be looping forever.
  1023     if ((QueuedAllocationWarningCount > 0) &&
  1024         (try_count % QueuedAllocationWarningCount == 0)) {
  1025       warning("G1CollectedHeap::attempt_allocation_slow() "
  1026               "retries %d times", try_count);
  1030   ShouldNotReachHere();
  1031   return NULL;
  1034 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
  1035                                           unsigned int * gc_count_before_ret,
  1036                                           int* gclocker_retry_count_ret) {
  1037   // The structure of this method has a lot of similarities to
  1038   // attempt_allocation_slow(). The reason these two were not merged
  1039   // into a single one is that such a method would require several "if
  1040   // allocation is not humongous do this, otherwise do that"
  1041   // conditional paths which would obscure its flow. In fact, an early
  1042   // version of this code did use a unified method which was harder to
  1043   // follow and, as a result, it had subtle bugs that were hard to
  1044   // track down. So keeping these two methods separate allows each to
  1045   // be more readable. It will be good to keep these two in sync as
  1046   // much as possible.
  1048   assert_heap_not_locked_and_not_at_safepoint();
  1049   assert(isHumongous(word_size), "attempt_allocation_humongous() "
  1050          "should only be called for humongous allocations");
  1052   // Humongous objects can exhaust the heap quickly, so we should check if we
  1053   // need to start a marking cycle at each humongous object allocation. We do
  1054   // the check before we do the actual allocation. The reason for doing it
  1055   // before the allocation is that we avoid having to keep track of the newly
  1056   // allocated memory while we do a GC.
  1057   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
  1058                                            word_size)) {
  1059     collect(GCCause::_g1_humongous_allocation);
  1062   // We will loop until a) we manage to successfully perform the
  1063   // allocation or b) we successfully schedule a collection which
  1064   // fails to perform the allocation. b) is the only case when we'll
  1065   // return NULL.
  1066   HeapWord* result = NULL;
  1067   for (int try_count = 1; /* we'll return */; try_count += 1) {
  1068     bool should_try_gc;
  1069     unsigned int gc_count_before;
  1072       MutexLockerEx x(Heap_lock);
  1074       // Given that humongous objects are not allocated in young
  1075       // regions, we'll first try to do the allocation without doing a
  1076       // collection hoping that there's enough space in the heap.
  1077       result = humongous_obj_allocate(word_size);
  1078       if (result != NULL) {
  1079         return result;
  1082       if (GC_locker::is_active_and_needs_gc()) {
  1083         should_try_gc = false;
  1084       } else {
  1085          // The GCLocker may not be active but the GCLocker initiated
  1086         // GC may not yet have been performed (GCLocker::needs_gc()
  1087         // returns true). In this case we do not try this GC and
  1088         // wait until the GCLocker initiated GC is performed, and
  1089         // then retry the allocation.
  1090         if (GC_locker::needs_gc()) {
  1091           should_try_gc = false;
  1092         } else {
  1093           // Read the GC count while still holding the Heap_lock.
  1094           gc_count_before = total_collections();
  1095           should_try_gc = true;
  1100     if (should_try_gc) {
  1101       // If we failed to allocate the humongous object, we should try to
  1102       // do a collection pause (if we're allowed) in case it reclaims
  1103       // enough space for the allocation to succeed after the pause.
  1105       bool succeeded;
  1106       result = do_collection_pause(word_size, gc_count_before, &succeeded,
  1107           GCCause::_g1_humongous_allocation);
  1108       if (result != NULL) {
  1109         assert(succeeded, "only way to get back a non-NULL result");
  1110         return result;
  1113       if (succeeded) {
  1114         // If we get here we successfully scheduled a collection which
  1115         // failed to allocate. No point in trying to allocate
  1116         // further. We'll just return NULL.
  1117         MutexLockerEx x(Heap_lock);
  1118         *gc_count_before_ret = total_collections();
  1119         return NULL;
  1121     } else {
  1122       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
  1123         MutexLockerEx x(Heap_lock);
  1124         *gc_count_before_ret = total_collections();
  1125         return NULL;
  1127       // The GCLocker is either active or the GCLocker initiated
  1128       // GC has not yet been performed. Stall until it is and
  1129       // then retry the allocation.
  1130       GC_locker::stall_until_clear();
  1131       (*gclocker_retry_count_ret) += 1;
  1134     // We can reach here if we were unsuccessful in scheduling a
  1135     // collection (because another thread beat us to it) or if we were
  1136     // stalled due to the GC locker. In either can we should retry the
  1137     // allocation attempt in case another thread successfully
  1138     // performed a collection and reclaimed enough space.  Give a
  1139     // warning if we seem to be looping forever.
  1141     if ((QueuedAllocationWarningCount > 0) &&
  1142         (try_count % QueuedAllocationWarningCount == 0)) {
  1143       warning("G1CollectedHeap::attempt_allocation_humongous() "
  1144               "retries %d times", try_count);
  1148   ShouldNotReachHere();
  1149   return NULL;
  1152 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
  1153                                        bool expect_null_mutator_alloc_region) {
  1154   assert_at_safepoint(true /* should_be_vm_thread */);
  1155   assert(_mutator_alloc_region.get() == NULL ||
  1156                                              !expect_null_mutator_alloc_region,
  1157          "the current alloc region was unexpectedly found to be non-NULL");
  1159   if (!isHumongous(word_size)) {
  1160     return _mutator_alloc_region.attempt_allocation_locked(word_size,
  1161                                                       false /* bot_updates */);
  1162   } else {
  1163     HeapWord* result = humongous_obj_allocate(word_size);
  1164     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
  1165       g1_policy()->set_initiate_conc_mark_if_possible();
  1167     return result;
  1170   ShouldNotReachHere();
  1173 class PostMCRemSetClearClosure: public HeapRegionClosure {
  1174   G1CollectedHeap* _g1h;
  1175   ModRefBarrierSet* _mr_bs;
  1176 public:
  1177   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
  1178     _g1h(g1h), _mr_bs(mr_bs) {}
  1180   bool doHeapRegion(HeapRegion* r) {
  1181     HeapRegionRemSet* hrrs = r->rem_set();
  1183     if (r->continuesHumongous()) {
  1184       // We'll assert that the strong code root list and RSet is empty
  1185       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
  1186       assert(hrrs->occupied() == 0, "RSet should be empty");
  1187       return false;
  1190     _g1h->reset_gc_time_stamps(r);
  1191     hrrs->clear();
  1192     // You might think here that we could clear just the cards
  1193     // corresponding to the used region.  But no: if we leave a dirty card
  1194     // in a region we might allocate into, then it would prevent that card
  1195     // from being enqueued, and cause it to be missed.
  1196     // Re: the performance cost: we shouldn't be doing full GC anyway!
  1197     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  1199     return false;
  1201 };
  1203 void G1CollectedHeap::clear_rsets_post_compaction() {
  1204   PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
  1205   heap_region_iterate(&rs_clear);
  1208 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  1209   G1CollectedHeap*   _g1h;
  1210   UpdateRSOopClosure _cl;
  1211   int                _worker_i;
  1212 public:
  1213   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
  1214     _cl(g1->g1_rem_set(), worker_i),
  1215     _worker_i(worker_i),
  1216     _g1h(g1)
  1217   { }
  1219   bool doHeapRegion(HeapRegion* r) {
  1220     if (!r->continuesHumongous()) {
  1221       _cl.set_from(r);
  1222       r->oop_iterate(&_cl);
  1224     return false;
  1226 };
  1228 class ParRebuildRSTask: public AbstractGangTask {
  1229   G1CollectedHeap* _g1;
  1230 public:
  1231   ParRebuildRSTask(G1CollectedHeap* g1)
  1232     : AbstractGangTask("ParRebuildRSTask"),
  1233       _g1(g1)
  1234   { }
  1236   void work(uint worker_id) {
  1237     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
  1238     _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
  1239                                           _g1->workers()->active_workers(),
  1240                                          HeapRegion::RebuildRSClaimValue);
  1242 };
  1244 class PostCompactionPrinterClosure: public HeapRegionClosure {
  1245 private:
  1246   G1HRPrinter* _hr_printer;
  1247 public:
  1248   bool doHeapRegion(HeapRegion* hr) {
  1249     assert(!hr->is_young(), "not expecting to find young regions");
  1250     // We only generate output for non-empty regions.
  1251     if (!hr->is_empty()) {
  1252       if (!hr->isHumongous()) {
  1253         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
  1254       } else if (hr->startsHumongous()) {
  1255         if (hr->region_num() == 1) {
  1256           // single humongous region
  1257           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
  1258         } else {
  1259           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
  1261       } else {
  1262         assert(hr->continuesHumongous(), "only way to get here");
  1263         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
  1266     return false;
  1269   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
  1270     : _hr_printer(hr_printer) { }
  1271 };
  1273 void G1CollectedHeap::print_hrs_post_compaction() {
  1274   PostCompactionPrinterClosure cl(hr_printer());
  1275   heap_region_iterate(&cl);
  1278 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1279                                     bool clear_all_soft_refs,
  1280                                     size_t word_size) {
  1281   assert_at_safepoint(true /* should_be_vm_thread */);
  1283   if (GC_locker::check_active_before_gc()) {
  1284     return false;
  1287   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
  1288   gc_timer->register_gc_start();
  1290   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
  1291   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
  1293   SvcGCMarker sgcm(SvcGCMarker::FULL);
  1294   ResourceMark rm;
  1296   print_heap_before_gc();
  1297   trace_heap_before_gc(gc_tracer);
  1299   size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
  1301   verify_region_sets_optional();
  1303   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
  1304                            collector_policy()->should_clear_all_soft_refs();
  1306   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  1309     IsGCActiveMark x;
  1311     // Timing
  1312     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
  1313     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
  1314     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  1317       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
  1318       TraceCollectorStats tcs(g1mm()->full_collection_counters());
  1319       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
  1321       double start = os::elapsedTime();
  1322       g1_policy()->record_full_collection_start();
  1324       // Note: When we have a more flexible GC logging framework that
  1325       // allows us to add optional attributes to a GC log record we
  1326       // could consider timing and reporting how long we wait in the
  1327       // following two methods.
  1328       wait_while_free_regions_coming();
  1329       // If we start the compaction before the CM threads finish
  1330       // scanning the root regions we might trip them over as we'll
  1331       // be moving objects / updating references. So let's wait until
  1332       // they are done. By telling them to abort, they should complete
  1333       // early.
  1334       _cm->root_regions()->abort();
  1335       _cm->root_regions()->wait_until_scan_finished();
  1336       append_secondary_free_list_if_not_empty_with_lock();
  1338       gc_prologue(true);
  1339       increment_total_collections(true /* full gc */);
  1340       increment_old_marking_cycles_started();
  1342       assert(used() == recalculate_used(), "Should be equal");
  1344       verify_before_gc();
  1346       pre_full_gc_dump(gc_timer);
  1348       COMPILER2_PRESENT(DerivedPointerTable::clear());
  1350       // Disable discovery and empty the discovered lists
  1351       // for the CM ref processor.
  1352       ref_processor_cm()->disable_discovery();
  1353       ref_processor_cm()->abandon_partial_discovery();
  1354       ref_processor_cm()->verify_no_references_recorded();
  1356       // Abandon current iterations of concurrent marking and concurrent
  1357       // refinement, if any are in progress. We have to do this before
  1358       // wait_until_scan_finished() below.
  1359       concurrent_mark()->abort();
  1361       // Make sure we'll choose a new allocation region afterwards.
  1362       release_mutator_alloc_region();
  1363       abandon_gc_alloc_regions();
  1364       g1_rem_set()->cleanupHRRS();
  1366       // We should call this after we retire any currently active alloc
  1367       // regions so that all the ALLOC / RETIRE events are generated
  1368       // before the start GC event.
  1369       _hr_printer.start_gc(true /* full */, (size_t) total_collections());
  1371       // We may have added regions to the current incremental collection
  1372       // set between the last GC or pause and now. We need to clear the
  1373       // incremental collection set and then start rebuilding it afresh
  1374       // after this full GC.
  1375       abandon_collection_set(g1_policy()->inc_cset_head());
  1376       g1_policy()->clear_incremental_cset();
  1377       g1_policy()->stop_incremental_cset_building();
  1379       tear_down_region_sets(false /* free_list_only */);
  1380       g1_policy()->set_gcs_are_young(true);
  1382       // See the comments in g1CollectedHeap.hpp and
  1383       // G1CollectedHeap::ref_processing_init() about
  1384       // how reference processing currently works in G1.
  1386       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
  1387       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
  1389       // Temporarily clear the STW ref processor's _is_alive_non_header field.
  1390       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
  1392       ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
  1393       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
  1395       // Do collection work
  1397         HandleMark hm;  // Discard invalid handles created during gc
  1398         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
  1401       assert(free_regions() == 0, "we should not have added any free regions");
  1402       rebuild_region_sets(false /* free_list_only */);
  1404       // Enqueue any discovered reference objects that have
  1405       // not been removed from the discovered lists.
  1406       ref_processor_stw()->enqueue_discovered_references();
  1408       COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1410       MemoryService::track_memory_usage();
  1412       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  1413       ref_processor_stw()->verify_no_references_recorded();
  1415       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
  1416       ClassLoaderDataGraph::purge();
  1417       MetaspaceAux::verify_metrics();
  1419       // Note: since we've just done a full GC, concurrent
  1420       // marking is no longer active. Therefore we need not
  1421       // re-enable reference discovery for the CM ref processor.
  1422       // That will be done at the start of the next marking cycle.
  1423       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
  1424       ref_processor_cm()->verify_no_references_recorded();
  1426       reset_gc_time_stamp();
  1427       // Since everything potentially moved, we will clear all remembered
  1428       // sets, and clear all cards.  Later we will rebuild remembered
  1429       // sets. We will also reset the GC time stamps of the regions.
  1430       clear_rsets_post_compaction();
  1431       check_gc_time_stamps();
  1433       // Resize the heap if necessary.
  1434       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  1436       if (_hr_printer.is_active()) {
  1437         // We should do this after we potentially resize the heap so
  1438         // that all the COMMIT / UNCOMMIT events are generated before
  1439         // the end GC event.
  1441         print_hrs_post_compaction();
  1442         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
  1445       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
  1446       if (hot_card_cache->use_cache()) {
  1447         hot_card_cache->reset_card_counts();
  1448         hot_card_cache->reset_hot_cache();
  1451       // Rebuild remembered sets of all regions.
  1452       if (G1CollectedHeap::use_parallel_gc_threads()) {
  1453         uint n_workers =
  1454           AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  1455                                                   workers()->active_workers(),
  1456                                                   Threads::number_of_non_daemon_threads());
  1457         assert(UseDynamicNumberOfGCThreads ||
  1458                n_workers == workers()->total_workers(),
  1459                "If not dynamic should be using all the  workers");
  1460         workers()->set_active_workers(n_workers);
  1461         // Set parallel threads in the heap (_n_par_threads) only
  1462         // before a parallel phase and always reset it to 0 after
  1463         // the phase so that the number of parallel threads does
  1464         // no get carried forward to a serial phase where there
  1465         // may be code that is "possibly_parallel".
  1466         set_par_threads(n_workers);
  1468         ParRebuildRSTask rebuild_rs_task(this);
  1469         assert(check_heap_region_claim_values(
  1470                HeapRegion::InitialClaimValue), "sanity check");
  1471         assert(UseDynamicNumberOfGCThreads ||
  1472                workers()->active_workers() == workers()->total_workers(),
  1473                "Unless dynamic should use total workers");
  1474         // Use the most recent number of  active workers
  1475         assert(workers()->active_workers() > 0,
  1476                "Active workers not properly set");
  1477         set_par_threads(workers()->active_workers());
  1478         workers()->run_task(&rebuild_rs_task);
  1479         set_par_threads(0);
  1480         assert(check_heap_region_claim_values(
  1481                HeapRegion::RebuildRSClaimValue), "sanity check");
  1482         reset_heap_region_claim_values();
  1483       } else {
  1484         RebuildRSOutOfRegionClosure rebuild_rs(this);
  1485         heap_region_iterate(&rebuild_rs);
  1488       // Rebuild the strong code root lists for each region
  1489       rebuild_strong_code_roots();
  1491       if (true) { // FIXME
  1492         MetaspaceGC::compute_new_size();
  1495 #ifdef TRACESPINNING
  1496       ParallelTaskTerminator::print_termination_counts();
  1497 #endif
  1499       // Discard all rset updates
  1500       JavaThread::dirty_card_queue_set().abandon_logs();
  1501       assert(!G1DeferredRSUpdate
  1502              || (G1DeferredRSUpdate &&
  1503                 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1505       _young_list->reset_sampled_info();
  1506       // At this point there should be no regions in the
  1507       // entire heap tagged as young.
  1508       assert(check_young_list_empty(true /* check_heap */),
  1509              "young list should be empty at this point");
  1511       // Update the number of full collections that have been completed.
  1512       increment_old_marking_cycles_completed(false /* concurrent */);
  1514       _hrs.verify_optional();
  1515       verify_region_sets_optional();
  1517       verify_after_gc();
  1519       // Start a new incremental collection set for the next pause
  1520       assert(g1_policy()->collection_set() == NULL, "must be");
  1521       g1_policy()->start_incremental_cset_building();
  1523       // Clear the _cset_fast_test bitmap in anticipation of adding
  1524       // regions to the incremental collection set for the next
  1525       // evacuation pause.
  1526       clear_cset_fast_test();
  1528       init_mutator_alloc_region();
  1530       double end = os::elapsedTime();
  1531       g1_policy()->record_full_collection_end();
  1533       if (G1Log::fine()) {
  1534         g1_policy()->print_heap_transition();
  1537       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  1538       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  1539       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  1540       // before any GC notifications are raised.
  1541       g1mm()->update_sizes();
  1543       gc_epilogue(true);
  1546     if (G1Log::finer()) {
  1547       g1_policy()->print_detailed_heap_transition(true /* full */);
  1550     print_heap_after_gc();
  1551     trace_heap_after_gc(gc_tracer);
  1553     post_full_gc_dump(gc_timer);
  1555     gc_timer->register_gc_end();
  1556     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
  1559   return true;
  1562 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1563   // do_collection() will return whether it succeeded in performing
  1564   // the GC. Currently, there is no facility on the
  1565   // do_full_collection() API to notify the caller than the collection
  1566   // did not succeed (e.g., because it was locked out by the GC
  1567   // locker). So, right now, we'll ignore the return value.
  1568   bool dummy = do_collection(true,                /* explicit_gc */
  1569                              clear_all_soft_refs,
  1570                              0                    /* word_size */);
  1573 // This code is mostly copied from TenuredGeneration.
  1574 void
  1575 G1CollectedHeap::
  1576 resize_if_necessary_after_full_collection(size_t word_size) {
  1577   // Include the current allocation, if any, and bytes that will be
  1578   // pre-allocated to support collections, as "used".
  1579   const size_t used_after_gc = used();
  1580   const size_t capacity_after_gc = capacity();
  1581   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1583   // This is enforced in arguments.cpp.
  1584   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
  1585          "otherwise the code below doesn't make sense");
  1587   // We don't have floating point command-line arguments
  1588   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
  1589   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1590   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
  1591   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1593   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
  1594   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
  1596   // We have to be careful here as these two calculations can overflow
  1597   // 32-bit size_t's.
  1598   double used_after_gc_d = (double) used_after_gc;
  1599   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
  1600   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
  1602   // Let's make sure that they are both under the max heap size, which
  1603   // by default will make them fit into a size_t.
  1604   double desired_capacity_upper_bound = (double) max_heap_size;
  1605   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
  1606                                     desired_capacity_upper_bound);
  1607   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
  1608                                     desired_capacity_upper_bound);
  1610   // We can now safely turn them into size_t's.
  1611   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
  1612   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
  1614   // This assert only makes sense here, before we adjust them
  1615   // with respect to the min and max heap size.
  1616   assert(minimum_desired_capacity <= maximum_desired_capacity,
  1617          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
  1618                  "maximum_desired_capacity = "SIZE_FORMAT,
  1619                  minimum_desired_capacity, maximum_desired_capacity));
  1621   // Should not be greater than the heap max size. No need to adjust
  1622   // it with respect to the heap min size as it's a lower bound (i.e.,
  1623   // we'll try to make the capacity larger than it, not smaller).
  1624   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
  1625   // Should not be less than the heap min size. No need to adjust it
  1626   // with respect to the heap max size as it's an upper bound (i.e.,
  1627   // we'll try to make the capacity smaller than it, not greater).
  1628   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
  1630   if (capacity_after_gc < minimum_desired_capacity) {
  1631     // Don't expand unless it's significant
  1632     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1633     ergo_verbose4(ErgoHeapSizing,
  1634                   "attempt heap expansion",
  1635                   ergo_format_reason("capacity lower than "
  1636                                      "min desired capacity after Full GC")
  1637                   ergo_format_byte("capacity")
  1638                   ergo_format_byte("occupancy")
  1639                   ergo_format_byte_perc("min desired capacity"),
  1640                   capacity_after_gc, used_after_gc,
  1641                   minimum_desired_capacity, (double) MinHeapFreeRatio);
  1642     expand(expand_bytes);
  1644     // No expansion, now see if we want to shrink
  1645   } else if (capacity_after_gc > maximum_desired_capacity) {
  1646     // Capacity too large, compute shrinking size
  1647     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1648     ergo_verbose4(ErgoHeapSizing,
  1649                   "attempt heap shrinking",
  1650                   ergo_format_reason("capacity higher than "
  1651                                      "max desired capacity after Full GC")
  1652                   ergo_format_byte("capacity")
  1653                   ergo_format_byte("occupancy")
  1654                   ergo_format_byte_perc("max desired capacity"),
  1655                   capacity_after_gc, used_after_gc,
  1656                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
  1657     shrink(shrink_bytes);
  1662 HeapWord*
  1663 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
  1664                                            bool* succeeded) {
  1665   assert_at_safepoint(true /* should_be_vm_thread */);
  1667   *succeeded = true;
  1668   // Let's attempt the allocation first.
  1669   HeapWord* result =
  1670     attempt_allocation_at_safepoint(word_size,
  1671                                  false /* expect_null_mutator_alloc_region */);
  1672   if (result != NULL) {
  1673     assert(*succeeded, "sanity");
  1674     return result;
  1677   // In a G1 heap, we're supposed to keep allocation from failing by
  1678   // incremental pauses.  Therefore, at least for now, we'll favor
  1679   // expansion over collection.  (This might change in the future if we can
  1680   // do something smarter than full collection to satisfy a failed alloc.)
  1681   result = expand_and_allocate(word_size);
  1682   if (result != NULL) {
  1683     assert(*succeeded, "sanity");
  1684     return result;
  1687   // Expansion didn't work, we'll try to do a Full GC.
  1688   bool gc_succeeded = do_collection(false, /* explicit_gc */
  1689                                     false, /* clear_all_soft_refs */
  1690                                     word_size);
  1691   if (!gc_succeeded) {
  1692     *succeeded = false;
  1693     return NULL;
  1696   // Retry the allocation
  1697   result = attempt_allocation_at_safepoint(word_size,
  1698                                   true /* expect_null_mutator_alloc_region */);
  1699   if (result != NULL) {
  1700     assert(*succeeded, "sanity");
  1701     return result;
  1704   // Then, try a Full GC that will collect all soft references.
  1705   gc_succeeded = do_collection(false, /* explicit_gc */
  1706                                true,  /* clear_all_soft_refs */
  1707                                word_size);
  1708   if (!gc_succeeded) {
  1709     *succeeded = false;
  1710     return NULL;
  1713   // Retry the allocation once more
  1714   result = attempt_allocation_at_safepoint(word_size,
  1715                                   true /* expect_null_mutator_alloc_region */);
  1716   if (result != NULL) {
  1717     assert(*succeeded, "sanity");
  1718     return result;
  1721   assert(!collector_policy()->should_clear_all_soft_refs(),
  1722          "Flag should have been handled and cleared prior to this point");
  1724   // What else?  We might try synchronous finalization later.  If the total
  1725   // space available is large enough for the allocation, then a more
  1726   // complete compaction phase than we've tried so far might be
  1727   // appropriate.
  1728   assert(*succeeded, "sanity");
  1729   return NULL;
  1732 // Attempting to expand the heap sufficiently
  1733 // to support an allocation of the given "word_size".  If
  1734 // successful, perform the allocation and return the address of the
  1735 // allocated block, or else "NULL".
  1737 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1738   assert_at_safepoint(true /* should_be_vm_thread */);
  1740   verify_region_sets_optional();
  1742   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
  1743   ergo_verbose1(ErgoHeapSizing,
  1744                 "attempt heap expansion",
  1745                 ergo_format_reason("allocation request failed")
  1746                 ergo_format_byte("allocation request"),
  1747                 word_size * HeapWordSize);
  1748   if (expand(expand_bytes)) {
  1749     _hrs.verify_optional();
  1750     verify_region_sets_optional();
  1751     return attempt_allocation_at_safepoint(word_size,
  1752                                  false /* expect_null_mutator_alloc_region */);
  1754   return NULL;
  1757 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
  1758                                              HeapWord* new_end) {
  1759   assert(old_end != new_end, "don't call this otherwise");
  1760   assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
  1762   // Update the committed mem region.
  1763   _g1_committed.set_end(new_end);
  1764   // Tell the card table about the update.
  1765   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1766   // Tell the BOT about the update.
  1767   _bot_shared->resize(_g1_committed.word_size());
  1768   // Tell the hot card cache about the update
  1769   _cg1r->hot_card_cache()->resize_card_counts(capacity());
  1772 bool G1CollectedHeap::expand(size_t expand_bytes) {
  1773   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
  1774   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1775                                        HeapRegion::GrainBytes);
  1776   ergo_verbose2(ErgoHeapSizing,
  1777                 "expand the heap",
  1778                 ergo_format_byte("requested expansion amount")
  1779                 ergo_format_byte("attempted expansion amount"),
  1780                 expand_bytes, aligned_expand_bytes);
  1782   if (_g1_storage.uncommitted_size() == 0) {
  1783     ergo_verbose0(ErgoHeapSizing,
  1784                       "did not expand the heap",
  1785                       ergo_format_reason("heap already fully expanded"));
  1786     return false;
  1789   // First commit the memory.
  1790   HeapWord* old_end = (HeapWord*) _g1_storage.high();
  1791   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
  1792   if (successful) {
  1793     // Then propagate this update to the necessary data structures.
  1794     HeapWord* new_end = (HeapWord*) _g1_storage.high();
  1795     update_committed_space(old_end, new_end);
  1797     FreeRegionList expansion_list("Local Expansion List");
  1798     MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
  1799     assert(mr.start() == old_end, "post-condition");
  1800     // mr might be a smaller region than what was requested if
  1801     // expand_by() was unable to allocate the HeapRegion instances
  1802     assert(mr.end() <= new_end, "post-condition");
  1804     size_t actual_expand_bytes = mr.byte_size();
  1805     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
  1806     assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
  1807            "post-condition");
  1808     if (actual_expand_bytes < aligned_expand_bytes) {
  1809       // We could not expand _hrs to the desired size. In this case we
  1810       // need to shrink the committed space accordingly.
  1811       assert(mr.end() < new_end, "invariant");
  1813       size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
  1814       // First uncommit the memory.
  1815       _g1_storage.shrink_by(diff_bytes);
  1816       // Then propagate this update to the necessary data structures.
  1817       update_committed_space(new_end, mr.end());
  1819     _free_list.add_as_tail(&expansion_list);
  1821     if (_hr_printer.is_active()) {
  1822       HeapWord* curr = mr.start();
  1823       while (curr < mr.end()) {
  1824         HeapWord* curr_end = curr + HeapRegion::GrainWords;
  1825         _hr_printer.commit(curr, curr_end);
  1826         curr = curr_end;
  1828       assert(curr == mr.end(), "post-condition");
  1830     g1_policy()->record_new_heap_size(n_regions());
  1831   } else {
  1832     ergo_verbose0(ErgoHeapSizing,
  1833                   "did not expand the heap",
  1834                   ergo_format_reason("heap expansion operation failed"));
  1835     // The expansion of the virtual storage space was unsuccessful.
  1836     // Let's see if it was because we ran out of swap.
  1837     if (G1ExitOnExpansionFailure &&
  1838         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
  1839       // We had head room...
  1840       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
  1843   return successful;
  1846 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
  1847   size_t aligned_shrink_bytes =
  1848     ReservedSpace::page_align_size_down(shrink_bytes);
  1849   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1850                                          HeapRegion::GrainBytes);
  1851   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
  1853   uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
  1854   HeapWord* old_end = (HeapWord*) _g1_storage.high();
  1855   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
  1857   ergo_verbose3(ErgoHeapSizing,
  1858                 "shrink the heap",
  1859                 ergo_format_byte("requested shrinking amount")
  1860                 ergo_format_byte("aligned shrinking amount")
  1861                 ergo_format_byte("attempted shrinking amount"),
  1862                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
  1863   if (num_regions_removed > 0) {
  1864     _g1_storage.shrink_by(shrunk_bytes);
  1865     HeapWord* new_end = (HeapWord*) _g1_storage.high();
  1867     if (_hr_printer.is_active()) {
  1868       HeapWord* curr = old_end;
  1869       while (curr > new_end) {
  1870         HeapWord* curr_end = curr;
  1871         curr -= HeapRegion::GrainWords;
  1872         _hr_printer.uncommit(curr, curr_end);
  1876     _expansion_regions += num_regions_removed;
  1877     update_committed_space(old_end, new_end);
  1878     HeapRegionRemSet::shrink_heap(n_regions());
  1879     g1_policy()->record_new_heap_size(n_regions());
  1880   } else {
  1881     ergo_verbose0(ErgoHeapSizing,
  1882                   "did not shrink the heap",
  1883                   ergo_format_reason("heap shrinking operation failed"));
  1887 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1888   verify_region_sets_optional();
  1890   // We should only reach here at the end of a Full GC which means we
  1891   // should not not be holding to any GC alloc regions. The method
  1892   // below will make sure of that and do any remaining clean up.
  1893   abandon_gc_alloc_regions();
  1895   // Instead of tearing down / rebuilding the free lists here, we
  1896   // could instead use the remove_all_pending() method on free_list to
  1897   // remove only the ones that we need to remove.
  1898   tear_down_region_sets(true /* free_list_only */);
  1899   shrink_helper(shrink_bytes);
  1900   rebuild_region_sets(true /* free_list_only */);
  1902   _hrs.verify_optional();
  1903   verify_region_sets_optional();
  1906 // Public methods.
  1908 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1909 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1910 #endif // _MSC_VER
  1913 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1914   SharedHeap(policy_),
  1915   _g1_policy(policy_),
  1916   _dirty_card_queue_set(false),
  1917   _into_cset_dirty_card_queue_set(false),
  1918   _is_alive_closure_cm(this),
  1919   _is_alive_closure_stw(this),
  1920   _ref_processor_cm(NULL),
  1921   _ref_processor_stw(NULL),
  1922   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1923   _bot_shared(NULL),
  1924   _evac_failure_scan_stack(NULL),
  1925   _mark_in_progress(false),
  1926   _cg1r(NULL), _summary_bytes_used(0),
  1927   _g1mm(NULL),
  1928   _refine_cte_cl(NULL),
  1929   _full_collection(false),
  1930   _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
  1931   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
  1932   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
  1933   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
  1934   _free_regions_coming(false),
  1935   _young_list(new YoungList(this)),
  1936   _gc_time_stamp(0),
  1937   _retained_old_gc_alloc_region(NULL),
  1938   _survivor_plab_stats(YoungPLABSize, PLABWeight),
  1939   _old_plab_stats(OldPLABSize, PLABWeight),
  1940   _expand_heap_after_alloc_failure(true),
  1941   _surviving_young_words(NULL),
  1942   _old_marking_cycles_started(0),
  1943   _old_marking_cycles_completed(0),
  1944   _concurrent_cycle_started(false),
  1945   _in_cset_fast_test(NULL),
  1946   _in_cset_fast_test_base(NULL),
  1947   _dirty_cards_region_list(NULL),
  1948   _worker_cset_start_region(NULL),
  1949   _worker_cset_start_region_time_stamp(NULL),
  1950   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
  1951   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
  1952   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
  1953   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
  1955   _g1h = this;
  1956   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1957     vm_exit_during_initialization("Failed necessary allocation.");
  1960   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
  1962   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1963   _task_queues = new RefToScanQueueSet(n_queues);
  1965   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1966   assert(n_rem_sets > 0, "Invariant.");
  1968   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
  1969   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
  1970   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
  1972   for (int i = 0; i < n_queues; i++) {
  1973     RefToScanQueue* q = new RefToScanQueue();
  1974     q->initialize();
  1975     _task_queues->register_queue(i, q);
  1976     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
  1978   clear_cset_start_regions();
  1980   // Initialize the G1EvacuationFailureALot counters and flags.
  1981   NOT_PRODUCT(reset_evacuation_should_fail();)
  1983   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1986 jint G1CollectedHeap::initialize() {
  1987   CollectedHeap::pre_initialize();
  1988   os::enable_vtime();
  1990   G1Log::init();
  1992   // Necessary to satisfy locking discipline assertions.
  1994   MutexLocker x(Heap_lock);
  1996   // We have to initialize the printer before committing the heap, as
  1997   // it will be used then.
  1998   _hr_printer.set_active(G1PrintHeapRegions);
  2000   // While there are no constraints in the GC code that HeapWordSize
  2001   // be any particular value, there are multiple other areas in the
  2002   // system which believe this to be true (e.g. oop->object_size in some
  2003   // cases incorrectly returns the size in wordSize units rather than
  2004   // HeapWordSize).
  2005   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  2007   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  2008   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  2009   size_t heap_alignment = collector_policy()->heap_alignment();
  2011   // Ensure that the sizes are properly aligned.
  2012   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  2013   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  2014   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
  2016   _cg1r = new ConcurrentG1Refine(this);
  2018   // Reserve the maximum.
  2020   // When compressed oops are enabled, the preferred heap base
  2021   // is calculated by subtracting the requested size from the
  2022   // 32Gb boundary and using the result as the base address for
  2023   // heap reservation. If the requested size is not aligned to
  2024   // HeapRegion::GrainBytes (i.e. the alignment that is passed
  2025   // into the ReservedHeapSpace constructor) then the actual
  2026   // base of the reserved heap may end up differing from the
  2027   // address that was requested (i.e. the preferred heap base).
  2028   // If this happens then we could end up using a non-optimal
  2029   // compressed oops mode.
  2031   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
  2032                                                  heap_alignment);
  2034   // It is important to do this in a way such that concurrent readers can't
  2035   // temporarily think something is in the heap.  (I've actually seen this
  2036   // happen in asserts: DLD.)
  2037   _reserved.set_word_size(0);
  2038   _reserved.set_start((HeapWord*)heap_rs.base());
  2039   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  2041   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
  2043   // Create the gen rem set (and barrier set) for the entire reserved region.
  2044   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  2045   set_barrier_set(rem_set()->bs());
  2046   if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
  2047     vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
  2048     return JNI_ENOMEM;
  2051   // Also create a G1 rem set.
  2052   _g1_rem_set = new G1RemSet(this, g1_barrier_set());
  2054   // Carve out the G1 part of the heap.
  2056   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  2057   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  2058                            g1_rs.size()/HeapWordSize);
  2060   _g1_storage.initialize(g1_rs, 0);
  2061   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  2062   _hrs.initialize((HeapWord*) _g1_reserved.start(),
  2063                   (HeapWord*) _g1_reserved.end());
  2064   assert(_hrs.max_length() == _expansion_regions,
  2065          err_msg("max length: %u expansion regions: %u",
  2066                  _hrs.max_length(), _expansion_regions));
  2068   // Do later initialization work for concurrent refinement.
  2069   _cg1r->init();
  2071   // 6843694 - ensure that the maximum region index can fit
  2072   // in the remembered set structures.
  2073   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  2074   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  2076   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  2077   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  2078   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
  2079             "too many cards per region");
  2081   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
  2083   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  2084                                              heap_word_size(init_byte_size));
  2086   _g1h = this;
  2088   _in_cset_fast_test_length = max_regions();
  2089   _in_cset_fast_test_base =
  2090                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
  2092   // We're biasing _in_cset_fast_test to avoid subtracting the
  2093   // beginning of the heap every time we want to index; basically
  2094   // it's the same with what we do with the card table.
  2095   _in_cset_fast_test = _in_cset_fast_test_base -
  2096                ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  2098   // Clear the _cset_fast_test bitmap in anticipation of adding
  2099   // regions to the incremental collection set for the first
  2100   // evacuation pause.
  2101   clear_cset_fast_test();
  2103   // Create the ConcurrentMark data structure and thread.
  2104   // (Must do this late, so that "max_regions" is defined.)
  2105   _cm = new ConcurrentMark(this, heap_rs);
  2106   if (_cm == NULL || !_cm->completed_initialization()) {
  2107     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
  2108     return JNI_ENOMEM;
  2110   _cmThread = _cm->cmThread();
  2112   // Initialize the from_card cache structure of HeapRegionRemSet.
  2113   HeapRegionRemSet::init_heap(max_regions());
  2115   // Now expand into the initial heap size.
  2116   if (!expand(init_byte_size)) {
  2117     vm_shutdown_during_initialization("Failed to allocate initial heap.");
  2118     return JNI_ENOMEM;
  2121   // Perform any initialization actions delegated to the policy.
  2122   g1_policy()->init();
  2124   _refine_cte_cl =
  2125     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
  2126                                     g1_rem_set(),
  2127                                     concurrent_g1_refine());
  2128   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
  2130   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  2131                                                SATB_Q_FL_lock,
  2132                                                G1SATBProcessCompletedThreshold,
  2133                                                Shared_SATB_Q_lock);
  2135   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2136                                                 DirtyCardQ_FL_lock,
  2137                                                 concurrent_g1_refine()->yellow_zone(),
  2138                                                 concurrent_g1_refine()->red_zone(),
  2139                                                 Shared_DirtyCardQ_lock);
  2141   if (G1DeferredRSUpdate) {
  2142     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2143                                       DirtyCardQ_FL_lock,
  2144                                       -1, // never trigger processing
  2145                                       -1, // no limit on length
  2146                                       Shared_DirtyCardQ_lock,
  2147                                       &JavaThread::dirty_card_queue_set());
  2150   // Initialize the card queue set used to hold cards containing
  2151   // references into the collection set.
  2152   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
  2153                                              DirtyCardQ_FL_lock,
  2154                                              -1, // never trigger processing
  2155                                              -1, // no limit on length
  2156                                              Shared_DirtyCardQ_lock,
  2157                                              &JavaThread::dirty_card_queue_set());
  2159   // In case we're keeping closure specialization stats, initialize those
  2160   // counts and that mechanism.
  2161   SpecializationStats::clear();
  2163   // Here we allocate the dummy full region that is required by the
  2164   // G1AllocRegion class. If we don't pass an address in the reserved
  2165   // space here, lots of asserts fire.
  2167   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
  2168                                              _g1_reserved.start());
  2169   // We'll re-use the same region whether the alloc region will
  2170   // require BOT updates or not and, if it doesn't, then a non-young
  2171   // region will complain that it cannot support allocations without
  2172   // BOT updates. So we'll tag the dummy region as young to avoid that.
  2173   dummy_region->set_young();
  2174   // Make sure it's full.
  2175   dummy_region->set_top(dummy_region->end());
  2176   G1AllocRegion::setup(this, dummy_region);
  2178   init_mutator_alloc_region();
  2180   // Do create of the monitoring and management support so that
  2181   // values in the heap have been properly initialized.
  2182   _g1mm = new G1MonitoringSupport(this);
  2184   return JNI_OK;
  2187 size_t G1CollectedHeap::conservative_max_heap_alignment() {
  2188   return HeapRegion::max_region_size();
  2191 void G1CollectedHeap::ref_processing_init() {
  2192   // Reference processing in G1 currently works as follows:
  2193   //
  2194   // * There are two reference processor instances. One is
  2195   //   used to record and process discovered references
  2196   //   during concurrent marking; the other is used to
  2197   //   record and process references during STW pauses
  2198   //   (both full and incremental).
  2199   // * Both ref processors need to 'span' the entire heap as
  2200   //   the regions in the collection set may be dotted around.
  2201   //
  2202   // * For the concurrent marking ref processor:
  2203   //   * Reference discovery is enabled at initial marking.
  2204   //   * Reference discovery is disabled and the discovered
  2205   //     references processed etc during remarking.
  2206   //   * Reference discovery is MT (see below).
  2207   //   * Reference discovery requires a barrier (see below).
  2208   //   * Reference processing may or may not be MT
  2209   //     (depending on the value of ParallelRefProcEnabled
  2210   //     and ParallelGCThreads).
  2211   //   * A full GC disables reference discovery by the CM
  2212   //     ref processor and abandons any entries on it's
  2213   //     discovered lists.
  2214   //
  2215   // * For the STW processor:
  2216   //   * Non MT discovery is enabled at the start of a full GC.
  2217   //   * Processing and enqueueing during a full GC is non-MT.
  2218   //   * During a full GC, references are processed after marking.
  2219   //
  2220   //   * Discovery (may or may not be MT) is enabled at the start
  2221   //     of an incremental evacuation pause.
  2222   //   * References are processed near the end of a STW evacuation pause.
  2223   //   * For both types of GC:
  2224   //     * Discovery is atomic - i.e. not concurrent.
  2225   //     * Reference discovery will not need a barrier.
  2227   SharedHeap::ref_processing_init();
  2228   MemRegion mr = reserved_region();
  2230   // Concurrent Mark ref processor
  2231   _ref_processor_cm =
  2232     new ReferenceProcessor(mr,    // span
  2233                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
  2234                                 // mt processing
  2235                            (int) ParallelGCThreads,
  2236                                 // degree of mt processing
  2237                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
  2238                                 // mt discovery
  2239                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
  2240                                 // degree of mt discovery
  2241                            false,
  2242                                 // Reference discovery is not atomic
  2243                            &_is_alive_closure_cm,
  2244                                 // is alive closure
  2245                                 // (for efficiency/performance)
  2246                            true);
  2247                                 // Setting next fields of discovered
  2248                                 // lists requires a barrier.
  2250   // STW ref processor
  2251   _ref_processor_stw =
  2252     new ReferenceProcessor(mr,    // span
  2253                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
  2254                                 // mt processing
  2255                            MAX2((int)ParallelGCThreads, 1),
  2256                                 // degree of mt processing
  2257                            (ParallelGCThreads > 1),
  2258                                 // mt discovery
  2259                            MAX2((int)ParallelGCThreads, 1),
  2260                                 // degree of mt discovery
  2261                            true,
  2262                                 // Reference discovery is atomic
  2263                            &_is_alive_closure_stw,
  2264                                 // is alive closure
  2265                                 // (for efficiency/performance)
  2266                            false);
  2267                                 // Setting next fields of discovered
  2268                                 // lists does not require a barrier.
  2271 size_t G1CollectedHeap::capacity() const {
  2272   return _g1_committed.byte_size();
  2275 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
  2276   assert(!hr->continuesHumongous(), "pre-condition");
  2277   hr->reset_gc_time_stamp();
  2278   if (hr->startsHumongous()) {
  2279     uint first_index = hr->hrs_index() + 1;
  2280     uint last_index = hr->last_hc_index();
  2281     for (uint i = first_index; i < last_index; i += 1) {
  2282       HeapRegion* chr = region_at(i);
  2283       assert(chr->continuesHumongous(), "sanity");
  2284       chr->reset_gc_time_stamp();
  2289 #ifndef PRODUCT
  2290 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
  2291 private:
  2292   unsigned _gc_time_stamp;
  2293   bool _failures;
  2295 public:
  2296   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
  2297     _gc_time_stamp(gc_time_stamp), _failures(false) { }
  2299   virtual bool doHeapRegion(HeapRegion* hr) {
  2300     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
  2301     if (_gc_time_stamp != region_gc_time_stamp) {
  2302       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
  2303                              "expected %d", HR_FORMAT_PARAMS(hr),
  2304                              region_gc_time_stamp, _gc_time_stamp);
  2305       _failures = true;
  2307     return false;
  2310   bool failures() { return _failures; }
  2311 };
  2313 void G1CollectedHeap::check_gc_time_stamps() {
  2314   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
  2315   heap_region_iterate(&cl);
  2316   guarantee(!cl.failures(), "all GC time stamps should have been reset");
  2318 #endif // PRODUCT
  2320 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  2321                                                  DirtyCardQueue* into_cset_dcq,
  2322                                                  bool concurrent,
  2323                                                  int worker_i) {
  2324   // Clean cards in the hot card cache
  2325   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
  2326   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
  2328   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2329   int n_completed_buffers = 0;
  2330   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
  2331     n_completed_buffers++;
  2333   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
  2334   dcqs.clear_n_completed_buffers();
  2335   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  2339 // Computes the sum of the storage used by the various regions.
  2341 size_t G1CollectedHeap::used() const {
  2342   assert(Heap_lock->owner() != NULL,
  2343          "Should be owned on this thread's behalf.");
  2344   size_t result = _summary_bytes_used;
  2345   // Read only once in case it is set to NULL concurrently
  2346   HeapRegion* hr = _mutator_alloc_region.get();
  2347   if (hr != NULL)
  2348     result += hr->used();
  2349   return result;
  2352 size_t G1CollectedHeap::used_unlocked() const {
  2353   size_t result = _summary_bytes_used;
  2354   return result;
  2357 class SumUsedClosure: public HeapRegionClosure {
  2358   size_t _used;
  2359 public:
  2360   SumUsedClosure() : _used(0) {}
  2361   bool doHeapRegion(HeapRegion* r) {
  2362     if (!r->continuesHumongous()) {
  2363       _used += r->used();
  2365     return false;
  2367   size_t result() { return _used; }
  2368 };
  2370 size_t G1CollectedHeap::recalculate_used() const {
  2371   SumUsedClosure blk;
  2372   heap_region_iterate(&blk);
  2373   return blk.result();
  2376 size_t G1CollectedHeap::unsafe_max_alloc() {
  2377   if (free_regions() > 0) return HeapRegion::GrainBytes;
  2378   // otherwise, is there space in the current allocation region?
  2380   // We need to store the current allocation region in a local variable
  2381   // here. The problem is that this method doesn't take any locks and
  2382   // there may be other threads which overwrite the current allocation
  2383   // region field. attempt_allocation(), for example, sets it to NULL
  2384   // and this can happen *after* the NULL check here but before the call
  2385   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  2386   // to be a problem in the optimized build, since the two loads of the
  2387   // current allocation region field are optimized away.
  2388   HeapRegion* hr = _mutator_alloc_region.get();
  2389   if (hr == NULL) {
  2390     return 0;
  2392   return hr->free();
  2395 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
  2396   switch (cause) {
  2397     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
  2398     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
  2399     case GCCause::_g1_humongous_allocation: return true;
  2400     default:                                return false;
  2404 #ifndef PRODUCT
  2405 void G1CollectedHeap::allocate_dummy_regions() {
  2406   // Let's fill up most of the region
  2407   size_t word_size = HeapRegion::GrainWords - 1024;
  2408   // And as a result the region we'll allocate will be humongous.
  2409   guarantee(isHumongous(word_size), "sanity");
  2411   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
  2412     // Let's use the existing mechanism for the allocation
  2413     HeapWord* dummy_obj = humongous_obj_allocate(word_size);
  2414     if (dummy_obj != NULL) {
  2415       MemRegion mr(dummy_obj, word_size);
  2416       CollectedHeap::fill_with_object(mr);
  2417     } else {
  2418       // If we can't allocate once, we probably cannot allocate
  2419       // again. Let's get out of the loop.
  2420       break;
  2424 #endif // !PRODUCT
  2426 void G1CollectedHeap::increment_old_marking_cycles_started() {
  2427   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
  2428     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
  2429     err_msg("Wrong marking cycle count (started: %d, completed: %d)",
  2430     _old_marking_cycles_started, _old_marking_cycles_completed));
  2432   _old_marking_cycles_started++;
  2435 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
  2436   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  2438   // We assume that if concurrent == true, then the caller is a
  2439   // concurrent thread that was joined the Suspendible Thread
  2440   // Set. If there's ever a cheap way to check this, we should add an
  2441   // assert here.
  2443   // Given that this method is called at the end of a Full GC or of a
  2444   // concurrent cycle, and those can be nested (i.e., a Full GC can
  2445   // interrupt a concurrent cycle), the number of full collections
  2446   // completed should be either one (in the case where there was no
  2447   // nesting) or two (when a Full GC interrupted a concurrent cycle)
  2448   // behind the number of full collections started.
  2450   // This is the case for the inner caller, i.e. a Full GC.
  2451   assert(concurrent ||
  2452          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
  2453          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
  2454          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
  2455                  "is inconsistent with _old_marking_cycles_completed = %u",
  2456                  _old_marking_cycles_started, _old_marking_cycles_completed));
  2458   // This is the case for the outer caller, i.e. the concurrent cycle.
  2459   assert(!concurrent ||
  2460          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
  2461          err_msg("for outer caller (concurrent cycle): "
  2462                  "_old_marking_cycles_started = %u "
  2463                  "is inconsistent with _old_marking_cycles_completed = %u",
  2464                  _old_marking_cycles_started, _old_marking_cycles_completed));
  2466   _old_marking_cycles_completed += 1;
  2468   // We need to clear the "in_progress" flag in the CM thread before
  2469   // we wake up any waiters (especially when ExplicitInvokesConcurrent
  2470   // is set) so that if a waiter requests another System.gc() it doesn't
  2471   // incorrectly see that a marking cycle is still in progress.
  2472   if (concurrent) {
  2473     _cmThread->clear_in_progress();
  2476   // This notify_all() will ensure that a thread that called
  2477   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2478   // and it's waiting for a full GC to finish will be woken up. It is
  2479   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2480   FullGCCount_lock->notify_all();
  2483 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
  2484   _concurrent_cycle_started = true;
  2485   _gc_timer_cm->register_gc_start(start_time);
  2487   _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
  2488   trace_heap_before_gc(_gc_tracer_cm);
  2491 void G1CollectedHeap::register_concurrent_cycle_end() {
  2492   if (_concurrent_cycle_started) {
  2493     if (_cm->has_aborted()) {
  2494       _gc_tracer_cm->report_concurrent_mode_failure();
  2497     _gc_timer_cm->register_gc_end();
  2498     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
  2500     _concurrent_cycle_started = false;
  2504 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
  2505   if (_concurrent_cycle_started) {
  2506     trace_heap_after_gc(_gc_tracer_cm);
  2510 G1YCType G1CollectedHeap::yc_type() {
  2511   bool is_young = g1_policy()->gcs_are_young();
  2512   bool is_initial_mark = g1_policy()->during_initial_mark_pause();
  2513   bool is_during_mark = mark_in_progress();
  2515   if (is_initial_mark) {
  2516     return InitialMark;
  2517   } else if (is_during_mark) {
  2518     return DuringMark;
  2519   } else if (is_young) {
  2520     return Normal;
  2521   } else {
  2522     return Mixed;
  2526 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2527   assert_heap_not_locked();
  2529   unsigned int gc_count_before;
  2530   unsigned int old_marking_count_before;
  2531   bool retry_gc;
  2533   do {
  2534     retry_gc = false;
  2537       MutexLocker ml(Heap_lock);
  2539       // Read the GC count while holding the Heap_lock
  2540       gc_count_before = total_collections();
  2541       old_marking_count_before = _old_marking_cycles_started;
  2544     if (should_do_concurrent_full_gc(cause)) {
  2545       // Schedule an initial-mark evacuation pause that will start a
  2546       // concurrent cycle. We're setting word_size to 0 which means that
  2547       // we are not requesting a post-GC allocation.
  2548       VM_G1IncCollectionPause op(gc_count_before,
  2549                                  0,     /* word_size */
  2550                                  true,  /* should_initiate_conc_mark */
  2551                                  g1_policy()->max_pause_time_ms(),
  2552                                  cause);
  2554       VMThread::execute(&op);
  2555       if (!op.pause_succeeded()) {
  2556         if (old_marking_count_before == _old_marking_cycles_started) {
  2557           retry_gc = op.should_retry_gc();
  2558         } else {
  2559           // A Full GC happened while we were trying to schedule the
  2560           // initial-mark GC. No point in starting a new cycle given
  2561           // that the whole heap was collected anyway.
  2564         if (retry_gc) {
  2565           if (GC_locker::is_active_and_needs_gc()) {
  2566             GC_locker::stall_until_clear();
  2570     } else {
  2571       if (cause == GCCause::_gc_locker
  2572           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2574         // Schedule a standard evacuation pause. We're setting word_size
  2575         // to 0 which means that we are not requesting a post-GC allocation.
  2576         VM_G1IncCollectionPause op(gc_count_before,
  2577                                    0,     /* word_size */
  2578                                    false, /* should_initiate_conc_mark */
  2579                                    g1_policy()->max_pause_time_ms(),
  2580                                    cause);
  2581         VMThread::execute(&op);
  2582       } else {
  2583         // Schedule a Full GC.
  2584         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
  2585         VMThread::execute(&op);
  2588   } while (retry_gc);
  2591 bool G1CollectedHeap::is_in(const void* p) const {
  2592   if (_g1_committed.contains(p)) {
  2593     // Given that we know that p is in the committed space,
  2594     // heap_region_containing_raw() should successfully
  2595     // return the containing region.
  2596     HeapRegion* hr = heap_region_containing_raw(p);
  2597     return hr->is_in(p);
  2598   } else {
  2599     return false;
  2603 // Iteration functions.
  2605 // Iterates an OopClosure over all ref-containing fields of objects
  2606 // within a HeapRegion.
  2608 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  2609   MemRegion _mr;
  2610   ExtendedOopClosure* _cl;
  2611 public:
  2612   IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
  2613     : _mr(mr), _cl(cl) {}
  2614   bool doHeapRegion(HeapRegion* r) {
  2615     if (!r->continuesHumongous()) {
  2616       r->oop_iterate(_cl);
  2618     return false;
  2620 };
  2622 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
  2623   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  2624   heap_region_iterate(&blk);
  2627 void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
  2628   IterateOopClosureRegionClosure blk(mr, cl);
  2629   heap_region_iterate(&blk);
  2632 // Iterates an ObjectClosure over all objects within a HeapRegion.
  2634 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  2635   ObjectClosure* _cl;
  2636 public:
  2637   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  2638   bool doHeapRegion(HeapRegion* r) {
  2639     if (! r->continuesHumongous()) {
  2640       r->object_iterate(_cl);
  2642     return false;
  2644 };
  2646 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
  2647   IterateObjectClosureRegionClosure blk(cl);
  2648   heap_region_iterate(&blk);
  2651 // Calls a SpaceClosure on a HeapRegion.
  2653 class SpaceClosureRegionClosure: public HeapRegionClosure {
  2654   SpaceClosure* _cl;
  2655 public:
  2656   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  2657   bool doHeapRegion(HeapRegion* r) {
  2658     _cl->do_space(r);
  2659     return false;
  2661 };
  2663 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  2664   SpaceClosureRegionClosure blk(cl);
  2665   heap_region_iterate(&blk);
  2668 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
  2669   _hrs.iterate(cl);
  2672 void
  2673 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  2674                                                  uint worker_id,
  2675                                                  uint no_of_par_workers,
  2676                                                  jint claim_value) {
  2677   const uint regions = n_regions();
  2678   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  2679                              no_of_par_workers :
  2680                              1);
  2681   assert(UseDynamicNumberOfGCThreads ||
  2682          no_of_par_workers == workers()->total_workers(),
  2683          "Non dynamic should use fixed number of workers");
  2684   // try to spread out the starting points of the workers
  2685   const HeapRegion* start_hr =
  2686                         start_region_for_worker(worker_id, no_of_par_workers);
  2687   const uint start_index = start_hr->hrs_index();
  2689   // each worker will actually look at all regions
  2690   for (uint count = 0; count < regions; ++count) {
  2691     const uint index = (start_index + count) % regions;
  2692     assert(0 <= index && index < regions, "sanity");
  2693     HeapRegion* r = region_at(index);
  2694     // we'll ignore "continues humongous" regions (we'll process them
  2695     // when we come across their corresponding "start humongous"
  2696     // region) and regions already claimed
  2697     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  2698       continue;
  2700     // OK, try to claim it
  2701     if (r->claimHeapRegion(claim_value)) {
  2702       // success!
  2703       assert(!r->continuesHumongous(), "sanity");
  2704       if (r->startsHumongous()) {
  2705         // If the region is "starts humongous" we'll iterate over its
  2706         // "continues humongous" first; in fact we'll do them
  2707         // first. The order is important. In on case, calling the
  2708         // closure on the "starts humongous" region might de-allocate
  2709         // and clear all its "continues humongous" regions and, as a
  2710         // result, we might end up processing them twice. So, we'll do
  2711         // them first (notice: most closures will ignore them anyway) and
  2712         // then we'll do the "starts humongous" region.
  2713         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
  2714           HeapRegion* chr = region_at(ch_index);
  2716           // if the region has already been claimed or it's not
  2717           // "continues humongous" we're done
  2718           if (chr->claim_value() == claim_value ||
  2719               !chr->continuesHumongous()) {
  2720             break;
  2723           // No one should have claimed it directly. We can given
  2724           // that we claimed its "starts humongous" region.
  2725           assert(chr->claim_value() != claim_value, "sanity");
  2726           assert(chr->humongous_start_region() == r, "sanity");
  2728           if (chr->claimHeapRegion(claim_value)) {
  2729             // we should always be able to claim it; no one else should
  2730             // be trying to claim this region
  2732             bool res2 = cl->doHeapRegion(chr);
  2733             assert(!res2, "Should not abort");
  2735             // Right now, this holds (i.e., no closure that actually
  2736             // does something with "continues humongous" regions
  2737             // clears them). We might have to weaken it in the future,
  2738             // but let's leave these two asserts here for extra safety.
  2739             assert(chr->continuesHumongous(), "should still be the case");
  2740             assert(chr->humongous_start_region() == r, "sanity");
  2741           } else {
  2742             guarantee(false, "we should not reach here");
  2747       assert(!r->continuesHumongous(), "sanity");
  2748       bool res = cl->doHeapRegion(r);
  2749       assert(!res, "Should not abort");
  2754 class ResetClaimValuesClosure: public HeapRegionClosure {
  2755 public:
  2756   bool doHeapRegion(HeapRegion* r) {
  2757     r->set_claim_value(HeapRegion::InitialClaimValue);
  2758     return false;
  2760 };
  2762 void G1CollectedHeap::reset_heap_region_claim_values() {
  2763   ResetClaimValuesClosure blk;
  2764   heap_region_iterate(&blk);
  2767 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
  2768   ResetClaimValuesClosure blk;
  2769   collection_set_iterate(&blk);
  2772 #ifdef ASSERT
  2773 // This checks whether all regions in the heap have the correct claim
  2774 // value. I also piggy-backed on this a check to ensure that the
  2775 // humongous_start_region() information on "continues humongous"
  2776 // regions is correct.
  2778 class CheckClaimValuesClosure : public HeapRegionClosure {
  2779 private:
  2780   jint _claim_value;
  2781   uint _failures;
  2782   HeapRegion* _sh_region;
  2784 public:
  2785   CheckClaimValuesClosure(jint claim_value) :
  2786     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  2787   bool doHeapRegion(HeapRegion* r) {
  2788     if (r->claim_value() != _claim_value) {
  2789       gclog_or_tty->print_cr("Region " HR_FORMAT ", "
  2790                              "claim value = %d, should be %d",
  2791                              HR_FORMAT_PARAMS(r),
  2792                              r->claim_value(), _claim_value);
  2793       ++_failures;
  2795     if (!r->isHumongous()) {
  2796       _sh_region = NULL;
  2797     } else if (r->startsHumongous()) {
  2798       _sh_region = r;
  2799     } else if (r->continuesHumongous()) {
  2800       if (r->humongous_start_region() != _sh_region) {
  2801         gclog_or_tty->print_cr("Region " HR_FORMAT ", "
  2802                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  2803                                HR_FORMAT_PARAMS(r),
  2804                                r->humongous_start_region(),
  2805                                _sh_region);
  2806         ++_failures;
  2809     return false;
  2811   uint failures() { return _failures; }
  2812 };
  2814 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  2815   CheckClaimValuesClosure cl(claim_value);
  2816   heap_region_iterate(&cl);
  2817   return cl.failures() == 0;
  2820 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
  2821 private:
  2822   jint _claim_value;
  2823   uint _failures;
  2825 public:
  2826   CheckClaimValuesInCSetHRClosure(jint claim_value) :
  2827     _claim_value(claim_value), _failures(0) { }
  2829   uint failures() { return _failures; }
  2831   bool doHeapRegion(HeapRegion* hr) {
  2832     assert(hr->in_collection_set(), "how?");
  2833     assert(!hr->isHumongous(), "H-region in CSet");
  2834     if (hr->claim_value() != _claim_value) {
  2835       gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
  2836                              "claim value = %d, should be %d",
  2837                              HR_FORMAT_PARAMS(hr),
  2838                              hr->claim_value(), _claim_value);
  2839       _failures += 1;
  2841     return false;
  2843 };
  2845 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
  2846   CheckClaimValuesInCSetHRClosure cl(claim_value);
  2847   collection_set_iterate(&cl);
  2848   return cl.failures() == 0;
  2850 #endif // ASSERT
  2852 // Clear the cached CSet starting regions and (more importantly)
  2853 // the time stamps. Called when we reset the GC time stamp.
  2854 void G1CollectedHeap::clear_cset_start_regions() {
  2855   assert(_worker_cset_start_region != NULL, "sanity");
  2856   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
  2858   int n_queues = MAX2((int)ParallelGCThreads, 1);
  2859   for (int i = 0; i < n_queues; i++) {
  2860     _worker_cset_start_region[i] = NULL;
  2861     _worker_cset_start_region_time_stamp[i] = 0;
  2865 // Given the id of a worker, obtain or calculate a suitable
  2866 // starting region for iterating over the current collection set.
  2867 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
  2868   assert(get_gc_time_stamp() > 0, "should have been updated by now");
  2870   HeapRegion* result = NULL;
  2871   unsigned gc_time_stamp = get_gc_time_stamp();
  2873   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
  2874     // Cached starting region for current worker was set
  2875     // during the current pause - so it's valid.
  2876     // Note: the cached starting heap region may be NULL
  2877     // (when the collection set is empty).
  2878     result = _worker_cset_start_region[worker_i];
  2879     assert(result == NULL || result->in_collection_set(), "sanity");
  2880     return result;
  2883   // The cached entry was not valid so let's calculate
  2884   // a suitable starting heap region for this worker.
  2886   // We want the parallel threads to start their collection
  2887   // set iteration at different collection set regions to
  2888   // avoid contention.
  2889   // If we have:
  2890   //          n collection set regions
  2891   //          p threads
  2892   // Then thread t will start at region floor ((t * n) / p)
  2894   result = g1_policy()->collection_set();
  2895   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2896     uint cs_size = g1_policy()->cset_region_length();
  2897     uint active_workers = workers()->active_workers();
  2898     assert(UseDynamicNumberOfGCThreads ||
  2899              active_workers == workers()->total_workers(),
  2900              "Unless dynamic should use total workers");
  2902     uint end_ind   = (cs_size * worker_i) / active_workers;
  2903     uint start_ind = 0;
  2905     if (worker_i > 0 &&
  2906         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
  2907       // Previous workers starting region is valid
  2908       // so let's iterate from there
  2909       start_ind = (cs_size * (worker_i - 1)) / active_workers;
  2910       result = _worker_cset_start_region[worker_i - 1];
  2913     for (uint i = start_ind; i < end_ind; i++) {
  2914       result = result->next_in_collection_set();
  2918   // Note: the calculated starting heap region may be NULL
  2919   // (when the collection set is empty).
  2920   assert(result == NULL || result->in_collection_set(), "sanity");
  2921   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
  2922          "should be updated only once per pause");
  2923   _worker_cset_start_region[worker_i] = result;
  2924   OrderAccess::storestore();
  2925   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
  2926   return result;
  2929 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
  2930                                                      uint no_of_par_workers) {
  2931   uint worker_num =
  2932            G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
  2933   assert(UseDynamicNumberOfGCThreads ||
  2934          no_of_par_workers == workers()->total_workers(),
  2935          "Non dynamic should use fixed number of workers");
  2936   const uint start_index = n_regions() * worker_i / worker_num;
  2937   return region_at(start_index);
  2940 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2941   HeapRegion* r = g1_policy()->collection_set();
  2942   while (r != NULL) {
  2943     HeapRegion* next = r->next_in_collection_set();
  2944     if (cl->doHeapRegion(r)) {
  2945       cl->incomplete();
  2946       return;
  2948     r = next;
  2952 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2953                                                   HeapRegionClosure *cl) {
  2954   if (r == NULL) {
  2955     // The CSet is empty so there's nothing to do.
  2956     return;
  2959   assert(r->in_collection_set(),
  2960          "Start region must be a member of the collection set.");
  2961   HeapRegion* cur = r;
  2962   while (cur != NULL) {
  2963     HeapRegion* next = cur->next_in_collection_set();
  2964     if (cl->doHeapRegion(cur) && false) {
  2965       cl->incomplete();
  2966       return;
  2968     cur = next;
  2970   cur = g1_policy()->collection_set();
  2971   while (cur != r) {
  2972     HeapRegion* next = cur->next_in_collection_set();
  2973     if (cl->doHeapRegion(cur) && false) {
  2974       cl->incomplete();
  2975       return;
  2977     cur = next;
  2981 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
  2982   return n_regions() > 0 ? region_at(0) : NULL;
  2986 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2987   Space* res = heap_region_containing(addr);
  2988   return res;
  2991 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2992   Space* sp = space_containing(addr);
  2993   if (sp != NULL) {
  2994     return sp->block_start(addr);
  2996   return NULL;
  2999 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  3000   Space* sp = space_containing(addr);
  3001   assert(sp != NULL, "block_size of address outside of heap");
  3002   return sp->block_size(addr);
  3005 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  3006   Space* sp = space_containing(addr);
  3007   return sp->block_is_obj(addr);
  3010 bool G1CollectedHeap::supports_tlab_allocation() const {
  3011   return true;
  3014 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  3015   return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
  3018 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
  3019   return young_list()->eden_used_bytes();
  3022 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
  3023 // must be smaller than the humongous object limit.
  3024 size_t G1CollectedHeap::max_tlab_size() const {
  3025   return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
  3028 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  3029   // Return the remaining space in the cur alloc region, but not less than
  3030   // the min TLAB size.
  3032   // Also, this value can be at most the humongous object threshold,
  3033   // since we can't allow tlabs to grow big enough to accommodate
  3034   // humongous objects.
  3036   HeapRegion* hr = _mutator_alloc_region.get();
  3037   size_t max_tlab = max_tlab_size() * wordSize;
  3038   if (hr == NULL) {
  3039     return max_tlab;
  3040   } else {
  3041     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
  3045 size_t G1CollectedHeap::max_capacity() const {
  3046   return _g1_reserved.byte_size();
  3049 jlong G1CollectedHeap::millis_since_last_gc() {
  3050   // assert(false, "NYI");
  3051   return 0;
  3054 void G1CollectedHeap::prepare_for_verify() {
  3055   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  3056     ensure_parsability(false);
  3058   g1_rem_set()->prepare_for_verify();
  3061 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
  3062                                               VerifyOption vo) {
  3063   switch (vo) {
  3064   case VerifyOption_G1UsePrevMarking:
  3065     return hr->obj_allocated_since_prev_marking(obj);
  3066   case VerifyOption_G1UseNextMarking:
  3067     return hr->obj_allocated_since_next_marking(obj);
  3068   case VerifyOption_G1UseMarkWord:
  3069     return false;
  3070   default:
  3071     ShouldNotReachHere();
  3073   return false; // keep some compilers happy
  3076 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
  3077   switch (vo) {
  3078   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
  3079   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
  3080   case VerifyOption_G1UseMarkWord:    return NULL;
  3081   default:                            ShouldNotReachHere();
  3083   return NULL; // keep some compilers happy
  3086 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
  3087   switch (vo) {
  3088   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
  3089   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
  3090   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
  3091   default:                            ShouldNotReachHere();
  3093   return false; // keep some compilers happy
  3096 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
  3097   switch (vo) {
  3098   case VerifyOption_G1UsePrevMarking: return "PTAMS";
  3099   case VerifyOption_G1UseNextMarking: return "NTAMS";
  3100   case VerifyOption_G1UseMarkWord:    return "NONE";
  3101   default:                            ShouldNotReachHere();
  3103   return NULL; // keep some compilers happy
  3106 class VerifyRootsClosure: public OopClosure {
  3107 private:
  3108   G1CollectedHeap* _g1h;
  3109   VerifyOption     _vo;
  3110   bool             _failures;
  3111 public:
  3112   // _vo == UsePrevMarking -> use "prev" marking information,
  3113   // _vo == UseNextMarking -> use "next" marking information,
  3114   // _vo == UseMarkWord    -> use mark word from object header.
  3115   VerifyRootsClosure(VerifyOption vo) :
  3116     _g1h(G1CollectedHeap::heap()),
  3117     _vo(vo),
  3118     _failures(false) { }
  3120   bool failures() { return _failures; }
  3122   template <class T> void do_oop_nv(T* p) {
  3123     T heap_oop = oopDesc::load_heap_oop(p);
  3124     if (!oopDesc::is_null(heap_oop)) {
  3125       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  3126       if (_g1h->is_obj_dead_cond(obj, _vo)) {
  3127         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  3128                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
  3129         if (_vo == VerifyOption_G1UseMarkWord) {
  3130           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
  3132         obj->print_on(gclog_or_tty);
  3133         _failures = true;
  3138   void do_oop(oop* p)       { do_oop_nv(p); }
  3139   void do_oop(narrowOop* p) { do_oop_nv(p); }
  3140 };
  3142 class G1VerifyCodeRootOopClosure: public OopClosure {
  3143   G1CollectedHeap* _g1h;
  3144   OopClosure* _root_cl;
  3145   nmethod* _nm;
  3146   VerifyOption _vo;
  3147   bool _failures;
  3149   template <class T> void do_oop_work(T* p) {
  3150     // First verify that this root is live
  3151     _root_cl->do_oop(p);
  3153     if (!G1VerifyHeapRegionCodeRoots) {
  3154       // We're not verifying the code roots attached to heap region.
  3155       return;
  3158     // Don't check the code roots during marking verification in a full GC
  3159     if (_vo == VerifyOption_G1UseMarkWord) {
  3160       return;
  3163     // Now verify that the current nmethod (which contains p) is
  3164     // in the code root list of the heap region containing the
  3165     // object referenced by p.
  3167     T heap_oop = oopDesc::load_heap_oop(p);
  3168     if (!oopDesc::is_null(heap_oop)) {
  3169       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  3171       // Now fetch the region containing the object
  3172       HeapRegion* hr = _g1h->heap_region_containing(obj);
  3173       HeapRegionRemSet* hrrs = hr->rem_set();
  3174       // Verify that the strong code root list for this region
  3175       // contains the nmethod
  3176       if (!hrrs->strong_code_roots_list_contains(_nm)) {
  3177         gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
  3178                               "from nmethod "PTR_FORMAT" not in strong "
  3179                               "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
  3180                               p, _nm, hr->bottom(), hr->end());
  3181         _failures = true;
  3186 public:
  3187   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
  3188     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
  3190   void do_oop(oop* p) { do_oop_work(p); }
  3191   void do_oop(narrowOop* p) { do_oop_work(p); }
  3193   void set_nmethod(nmethod* nm) { _nm = nm; }
  3194   bool failures() { return _failures; }
  3195 };
  3197 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
  3198   G1VerifyCodeRootOopClosure* _oop_cl;
  3200 public:
  3201   G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
  3202     _oop_cl(oop_cl) {}
  3204   void do_code_blob(CodeBlob* cb) {
  3205     nmethod* nm = cb->as_nmethod_or_null();
  3206     if (nm != NULL) {
  3207       _oop_cl->set_nmethod(nm);
  3208       nm->oops_do(_oop_cl);
  3211 };
  3213 class YoungRefCounterClosure : public OopClosure {
  3214   G1CollectedHeap* _g1h;
  3215   int              _count;
  3216  public:
  3217   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
  3218   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
  3219   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  3221   int count() { return _count; }
  3222   void reset_count() { _count = 0; };
  3223 };
  3225 class VerifyKlassClosure: public KlassClosure {
  3226   YoungRefCounterClosure _young_ref_counter_closure;
  3227   OopClosure *_oop_closure;
  3228  public:
  3229   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
  3230   void do_klass(Klass* k) {
  3231     k->oops_do(_oop_closure);
  3233     _young_ref_counter_closure.reset_count();
  3234     k->oops_do(&_young_ref_counter_closure);
  3235     if (_young_ref_counter_closure.count() > 0) {
  3236       guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
  3239 };
  3241 class VerifyLivenessOopClosure: public OopClosure {
  3242   G1CollectedHeap* _g1h;
  3243   VerifyOption _vo;
  3244 public:
  3245   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
  3246     _g1h(g1h), _vo(vo)
  3247   { }
  3248   void do_oop(narrowOop *p) { do_oop_work(p); }
  3249   void do_oop(      oop *p) { do_oop_work(p); }
  3251   template <class T> void do_oop_work(T *p) {
  3252     oop obj = oopDesc::load_decode_heap_oop(p);
  3253     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
  3254               "Dead object referenced by a not dead object");
  3256 };
  3258 class VerifyObjsInRegionClosure: public ObjectClosure {
  3259 private:
  3260   G1CollectedHeap* _g1h;
  3261   size_t _live_bytes;
  3262   HeapRegion *_hr;
  3263   VerifyOption _vo;
  3264 public:
  3265   // _vo == UsePrevMarking -> use "prev" marking information,
  3266   // _vo == UseNextMarking -> use "next" marking information,
  3267   // _vo == UseMarkWord    -> use mark word from object header.
  3268   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
  3269     : _live_bytes(0), _hr(hr), _vo(vo) {
  3270     _g1h = G1CollectedHeap::heap();
  3272   void do_object(oop o) {
  3273     VerifyLivenessOopClosure isLive(_g1h, _vo);
  3274     assert(o != NULL, "Huh?");
  3275     if (!_g1h->is_obj_dead_cond(o, _vo)) {
  3276       // If the object is alive according to the mark word,
  3277       // then verify that the marking information agrees.
  3278       // Note we can't verify the contra-positive of the
  3279       // above: if the object is dead (according to the mark
  3280       // word), it may not be marked, or may have been marked
  3281       // but has since became dead, or may have been allocated
  3282       // since the last marking.
  3283       if (_vo == VerifyOption_G1UseMarkWord) {
  3284         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
  3287       o->oop_iterate_no_header(&isLive);
  3288       if (!_hr->obj_allocated_since_prev_marking(o)) {
  3289         size_t obj_size = o->size();    // Make sure we don't overflow
  3290         _live_bytes += (obj_size * HeapWordSize);
  3294   size_t live_bytes() { return _live_bytes; }
  3295 };
  3297 class PrintObjsInRegionClosure : public ObjectClosure {
  3298   HeapRegion *_hr;
  3299   G1CollectedHeap *_g1;
  3300 public:
  3301   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  3302     _g1 = G1CollectedHeap::heap();
  3303   };
  3305   void do_object(oop o) {
  3306     if (o != NULL) {
  3307       HeapWord *start = (HeapWord *) o;
  3308       size_t word_sz = o->size();
  3309       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  3310                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  3311                           (void*) o, word_sz,
  3312                           _g1->isMarkedPrev(o),
  3313                           _g1->isMarkedNext(o),
  3314                           _hr->obj_allocated_since_prev_marking(o));
  3315       HeapWord *end = start + word_sz;
  3316       HeapWord *cur;
  3317       int *val;
  3318       for (cur = start; cur < end; cur++) {
  3319         val = (int *) cur;
  3320         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  3324 };
  3326 class VerifyRegionClosure: public HeapRegionClosure {
  3327 private:
  3328   bool             _par;
  3329   VerifyOption     _vo;
  3330   bool             _failures;
  3331 public:
  3332   // _vo == UsePrevMarking -> use "prev" marking information,
  3333   // _vo == UseNextMarking -> use "next" marking information,
  3334   // _vo == UseMarkWord    -> use mark word from object header.
  3335   VerifyRegionClosure(bool par, VerifyOption vo)
  3336     : _par(par),
  3337       _vo(vo),
  3338       _failures(false) {}
  3340   bool failures() {
  3341     return _failures;
  3344   bool doHeapRegion(HeapRegion* r) {
  3345     if (!r->continuesHumongous()) {
  3346       bool failures = false;
  3347       r->verify(_vo, &failures);
  3348       if (failures) {
  3349         _failures = true;
  3350       } else {
  3351         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
  3352         r->object_iterate(&not_dead_yet_cl);
  3353         if (_vo != VerifyOption_G1UseNextMarking) {
  3354           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
  3355             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
  3356                                    "max_live_bytes "SIZE_FORMAT" "
  3357                                    "< calculated "SIZE_FORMAT,
  3358                                    r->bottom(), r->end(),
  3359                                    r->max_live_bytes(),
  3360                                  not_dead_yet_cl.live_bytes());
  3361             _failures = true;
  3363         } else {
  3364           // When vo == UseNextMarking we cannot currently do a sanity
  3365           // check on the live bytes as the calculation has not been
  3366           // finalized yet.
  3370     return false; // stop the region iteration if we hit a failure
  3372 };
  3374 // This is the task used for parallel verification of the heap regions
  3376 class G1ParVerifyTask: public AbstractGangTask {
  3377 private:
  3378   G1CollectedHeap* _g1h;
  3379   VerifyOption     _vo;
  3380   bool             _failures;
  3382 public:
  3383   // _vo == UsePrevMarking -> use "prev" marking information,
  3384   // _vo == UseNextMarking -> use "next" marking information,
  3385   // _vo == UseMarkWord    -> use mark word from object header.
  3386   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
  3387     AbstractGangTask("Parallel verify task"),
  3388     _g1h(g1h),
  3389     _vo(vo),
  3390     _failures(false) { }
  3392   bool failures() {
  3393     return _failures;
  3396   void work(uint worker_id) {
  3397     HandleMark hm;
  3398     VerifyRegionClosure blk(true, _vo);
  3399     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
  3400                                           _g1h->workers()->active_workers(),
  3401                                           HeapRegion::ParVerifyClaimValue);
  3402     if (blk.failures()) {
  3403       _failures = true;
  3406 };
  3408 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
  3409   if (SafepointSynchronize::is_at_safepoint()) {
  3410     assert(Thread::current()->is_VM_thread(),
  3411            "Expected to be executed serially by the VM thread at this point");
  3413     if (!silent) { gclog_or_tty->print("Roots "); }
  3414     VerifyRootsClosure rootsCl(vo);
  3415     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
  3416     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
  3417     VerifyKlassClosure klassCl(this, &rootsCl);
  3419     // We apply the relevant closures to all the oops in the
  3420     // system dictionary, the string table and the code cache.
  3421     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
  3423     // Need cleared claim bits for the strong roots processing
  3424     ClassLoaderDataGraph::clear_claimed_marks();
  3426     process_strong_roots(true,      // activate StrongRootsScope
  3427                          false,     // we set "is scavenging" to false,
  3428                                     // so we don't reset the dirty cards.
  3429                          ScanningOption(so),  // roots scanning options
  3430                          &rootsCl,
  3431                          &blobsCl,
  3432                          &klassCl
  3433                          );
  3435     bool failures = rootsCl.failures() || codeRootsCl.failures();
  3437     if (vo != VerifyOption_G1UseMarkWord) {
  3438       // If we're verifying during a full GC then the region sets
  3439       // will have been torn down at the start of the GC. Therefore
  3440       // verifying the region sets will fail. So we only verify
  3441       // the region sets when not in a full GC.
  3442       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
  3443       verify_region_sets();
  3446     if (!silent) { gclog_or_tty->print("HeapRegions "); }
  3447     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  3448       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3449              "sanity check");
  3451       G1ParVerifyTask task(this, vo);
  3452       assert(UseDynamicNumberOfGCThreads ||
  3453         workers()->active_workers() == workers()->total_workers(),
  3454         "If not dynamic should be using all the workers");
  3455       int n_workers = workers()->active_workers();
  3456       set_par_threads(n_workers);
  3457       workers()->run_task(&task);
  3458       set_par_threads(0);
  3459       if (task.failures()) {
  3460         failures = true;
  3463       // Checks that the expected amount of parallel work was done.
  3464       // The implication is that n_workers is > 0.
  3465       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  3466              "sanity check");
  3468       reset_heap_region_claim_values();
  3470       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3471              "sanity check");
  3472     } else {
  3473       VerifyRegionClosure blk(false, vo);
  3474       heap_region_iterate(&blk);
  3475       if (blk.failures()) {
  3476         failures = true;
  3479     if (!silent) gclog_or_tty->print("RemSet ");
  3480     rem_set()->verify();
  3482     if (failures) {
  3483       gclog_or_tty->print_cr("Heap:");
  3484       // It helps to have the per-region information in the output to
  3485       // help us track down what went wrong. This is why we call
  3486       // print_extended_on() instead of print_on().
  3487       print_extended_on(gclog_or_tty);
  3488       gclog_or_tty->print_cr("");
  3489 #ifndef PRODUCT
  3490       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
  3491         concurrent_mark()->print_reachable("at-verification-failure",
  3492                                            vo, false /* all */);
  3494 #endif
  3495       gclog_or_tty->flush();
  3497     guarantee(!failures, "there should not have been any failures");
  3498   } else {
  3499     if (!silent)
  3500       gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) ");
  3504 void G1CollectedHeap::verify(bool silent) {
  3505   verify(silent, VerifyOption_G1UsePrevMarking);
  3508 double G1CollectedHeap::verify(bool guard, const char* msg) {
  3509   double verify_time_ms = 0.0;
  3511   if (guard && total_collections() >= VerifyGCStartAt) {
  3512     double verify_start = os::elapsedTime();
  3513     HandleMark hm;  // Discard invalid handles created during verification
  3514     prepare_for_verify();
  3515     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
  3516     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
  3519   return verify_time_ms;
  3522 void G1CollectedHeap::verify_before_gc() {
  3523   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
  3524   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
  3527 void G1CollectedHeap::verify_after_gc() {
  3528   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
  3529   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
  3532 class PrintRegionClosure: public HeapRegionClosure {
  3533   outputStream* _st;
  3534 public:
  3535   PrintRegionClosure(outputStream* st) : _st(st) {}
  3536   bool doHeapRegion(HeapRegion* r) {
  3537     r->print_on(_st);
  3538     return false;
  3540 };
  3542 void G1CollectedHeap::print_on(outputStream* st) const {
  3543   st->print(" %-20s", "garbage-first heap");
  3544   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  3545             capacity()/K, used_unlocked()/K);
  3546   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  3547             _g1_storage.low_boundary(),
  3548             _g1_storage.high(),
  3549             _g1_storage.high_boundary());
  3550   st->cr();
  3551   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
  3552   uint young_regions = _young_list->length();
  3553   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
  3554             (size_t) young_regions * HeapRegion::GrainBytes / K);
  3555   uint survivor_regions = g1_policy()->recorded_survivor_regions();
  3556   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
  3557             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
  3558   st->cr();
  3559   MetaspaceAux::print_on(st);
  3562 void G1CollectedHeap::print_extended_on(outputStream* st) const {
  3563   print_on(st);
  3565   // Print the per-region information.
  3566   st->cr();
  3567   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
  3568                "HS=humongous(starts), HC=humongous(continues), "
  3569                "CS=collection set, F=free, TS=gc time stamp, "
  3570                "PTAMS=previous top-at-mark-start, "
  3571                "NTAMS=next top-at-mark-start)");
  3572   PrintRegionClosure blk(st);
  3573   heap_region_iterate(&blk);
  3576 void G1CollectedHeap::print_on_error(outputStream* st) const {
  3577   this->CollectedHeap::print_on_error(st);
  3579   if (_cm != NULL) {
  3580     st->cr();
  3581     _cm->print_on_error(st);
  3585 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  3586   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3587     workers()->print_worker_threads_on(st);
  3589   _cmThread->print_on(st);
  3590   st->cr();
  3591   _cm->print_worker_threads_on(st);
  3592   _cg1r->print_worker_threads_on(st);
  3595 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  3596   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3597     workers()->threads_do(tc);
  3599   tc->do_thread(_cmThread);
  3600   _cg1r->threads_do(tc);
  3603 void G1CollectedHeap::print_tracing_info() const {
  3604   // We'll overload this to mean "trace GC pause statistics."
  3605   if (TraceGen0Time || TraceGen1Time) {
  3606     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  3607     // to that.
  3608     g1_policy()->print_tracing_info();
  3610   if (G1SummarizeRSetStats) {
  3611     g1_rem_set()->print_summary_info();
  3613   if (G1SummarizeConcMark) {
  3614     concurrent_mark()->print_summary_info();
  3616   g1_policy()->print_yg_surv_rate_info();
  3617   SpecializationStats::print();
  3620 #ifndef PRODUCT
  3621 // Helpful for debugging RSet issues.
  3623 class PrintRSetsClosure : public HeapRegionClosure {
  3624 private:
  3625   const char* _msg;
  3626   size_t _occupied_sum;
  3628 public:
  3629   bool doHeapRegion(HeapRegion* r) {
  3630     HeapRegionRemSet* hrrs = r->rem_set();
  3631     size_t occupied = hrrs->occupied();
  3632     _occupied_sum += occupied;
  3634     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
  3635                            HR_FORMAT_PARAMS(r));
  3636     if (occupied == 0) {
  3637       gclog_or_tty->print_cr("  RSet is empty");
  3638     } else {
  3639       hrrs->print();
  3641     gclog_or_tty->print_cr("----------");
  3642     return false;
  3645   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
  3646     gclog_or_tty->cr();
  3647     gclog_or_tty->print_cr("========================================");
  3648     gclog_or_tty->print_cr(msg);
  3649     gclog_or_tty->cr();
  3652   ~PrintRSetsClosure() {
  3653     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
  3654     gclog_or_tty->print_cr("========================================");
  3655     gclog_or_tty->cr();
  3657 };
  3659 void G1CollectedHeap::print_cset_rsets() {
  3660   PrintRSetsClosure cl("Printing CSet RSets");
  3661   collection_set_iterate(&cl);
  3664 void G1CollectedHeap::print_all_rsets() {
  3665   PrintRSetsClosure cl("Printing All RSets");;
  3666   heap_region_iterate(&cl);
  3668 #endif // PRODUCT
  3670 G1CollectedHeap* G1CollectedHeap::heap() {
  3671   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  3672          "not a garbage-first heap");
  3673   return _g1h;
  3676 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  3677   // always_do_update_barrier = false;
  3678   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  3679   // Fill TLAB's and such
  3680   accumulate_statistics_all_tlabs();
  3681   ensure_parsability(true);
  3683   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
  3684       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  3685     g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
  3689 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  3691   if (G1SummarizeRSetStats &&
  3692       (G1SummarizeRSetStatsPeriod > 0) &&
  3693       // we are at the end of the GC. Total collections has already been increased.
  3694       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
  3695     g1_rem_set()->print_periodic_summary_info("After GC RS summary");
  3698   // FIXME: what is this about?
  3699   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  3700   // is set.
  3701   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  3702                         "derived pointer present"));
  3703   // always_do_update_barrier = true;
  3705   resize_all_tlabs();
  3707   // We have just completed a GC. Update the soft reference
  3708   // policy with the new heap occupancy
  3709   Universe::update_heap_info_at_gc();
  3712 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  3713                                                unsigned int gc_count_before,
  3714                                                bool* succeeded,
  3715                                                GCCause::Cause gc_cause) {
  3716   assert_heap_not_locked_and_not_at_safepoint();
  3717   g1_policy()->record_stop_world_start();
  3718   VM_G1IncCollectionPause op(gc_count_before,
  3719                              word_size,
  3720                              false, /* should_initiate_conc_mark */
  3721                              g1_policy()->max_pause_time_ms(),
  3722                              gc_cause);
  3723   VMThread::execute(&op);
  3725   HeapWord* result = op.result();
  3726   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
  3727   assert(result == NULL || ret_succeeded,
  3728          "the result should be NULL if the VM did not succeed");
  3729   *succeeded = ret_succeeded;
  3731   assert_heap_not_locked();
  3732   return result;
  3735 void
  3736 G1CollectedHeap::doConcurrentMark() {
  3737   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  3738   if (!_cmThread->in_progress()) {
  3739     _cmThread->set_started();
  3740     CGC_lock->notify();
  3744 size_t G1CollectedHeap::pending_card_num() {
  3745   size_t extra_cards = 0;
  3746   JavaThread *curr = Threads::first();
  3747   while (curr != NULL) {
  3748     DirtyCardQueue& dcq = curr->dirty_card_queue();
  3749     extra_cards += dcq.size();
  3750     curr = curr->next();
  3752   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3753   size_t buffer_size = dcqs.buffer_size();
  3754   size_t buffer_num = dcqs.completed_buffers_num();
  3756   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
  3757   // in bytes - not the number of 'entries'. We need to convert
  3758   // into a number of cards.
  3759   return (buffer_size * buffer_num + extra_cards) / oopSize;
  3762 size_t G1CollectedHeap::cards_scanned() {
  3763   return g1_rem_set()->cardsScanned();
  3766 void
  3767 G1CollectedHeap::setup_surviving_young_words() {
  3768   assert(_surviving_young_words == NULL, "pre-condition");
  3769   uint array_length = g1_policy()->young_cset_region_length();
  3770   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
  3771   if (_surviving_young_words == NULL) {
  3772     vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
  3773                           "Not enough space for young surv words summary.");
  3775   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
  3776 #ifdef ASSERT
  3777   for (uint i = 0;  i < array_length; ++i) {
  3778     assert( _surviving_young_words[i] == 0, "memset above" );
  3780 #endif // !ASSERT
  3783 void
  3784 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  3785   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3786   uint array_length = g1_policy()->young_cset_region_length();
  3787   for (uint i = 0; i < array_length; ++i) {
  3788     _surviving_young_words[i] += surv_young_words[i];
  3792 void
  3793 G1CollectedHeap::cleanup_surviving_young_words() {
  3794   guarantee( _surviving_young_words != NULL, "pre-condition" );
  3795   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
  3796   _surviving_young_words = NULL;
  3799 #ifdef ASSERT
  3800 class VerifyCSetClosure: public HeapRegionClosure {
  3801 public:
  3802   bool doHeapRegion(HeapRegion* hr) {
  3803     // Here we check that the CSet region's RSet is ready for parallel
  3804     // iteration. The fields that we'll verify are only manipulated
  3805     // when the region is part of a CSet and is collected. Afterwards,
  3806     // we reset these fields when we clear the region's RSet (when the
  3807     // region is freed) so they are ready when the region is
  3808     // re-allocated. The only exception to this is if there's an
  3809     // evacuation failure and instead of freeing the region we leave
  3810     // it in the heap. In that case, we reset these fields during
  3811     // evacuation failure handling.
  3812     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
  3814     // Here's a good place to add any other checks we'd like to
  3815     // perform on CSet regions.
  3816     return false;
  3818 };
  3819 #endif // ASSERT
  3821 #if TASKQUEUE_STATS
  3822 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
  3823   st->print_raw_cr("GC Task Stats");
  3824   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
  3825   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
  3828 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
  3829   print_taskqueue_stats_hdr(st);
  3831   TaskQueueStats totals;
  3832   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3833   for (int i = 0; i < n; ++i) {
  3834     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
  3835     totals += task_queue(i)->stats;
  3837   st->print_raw("tot "); totals.print(st); st->cr();
  3839   DEBUG_ONLY(totals.verify());
  3842 void G1CollectedHeap::reset_taskqueue_stats() {
  3843   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3844   for (int i = 0; i < n; ++i) {
  3845     task_queue(i)->stats.reset();
  3848 #endif // TASKQUEUE_STATS
  3850 void G1CollectedHeap::log_gc_header() {
  3851   if (!G1Log::fine()) {
  3852     return;
  3855   gclog_or_tty->date_stamp(PrintGCDateStamps);
  3856   gclog_or_tty->stamp(PrintGCTimeStamps);
  3858   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
  3859     .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
  3860     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
  3862   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
  3865 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
  3866   if (!G1Log::fine()) {
  3867     return;
  3870   if (G1Log::finer()) {
  3871     if (evacuation_failed()) {
  3872       gclog_or_tty->print(" (to-space exhausted)");
  3874     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
  3875     g1_policy()->phase_times()->note_gc_end();
  3876     g1_policy()->phase_times()->print(pause_time_sec);
  3877     g1_policy()->print_detailed_heap_transition();
  3878   } else {
  3879     if (evacuation_failed()) {
  3880       gclog_or_tty->print("--");
  3882     g1_policy()->print_heap_transition();
  3883     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
  3885   gclog_or_tty->flush();
  3888 bool
  3889 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
  3890   assert_at_safepoint(true /* should_be_vm_thread */);
  3891   guarantee(!is_gc_active(), "collection is not reentrant");
  3893   if (GC_locker::check_active_before_gc()) {
  3894     return false;
  3897   _gc_timer_stw->register_gc_start();
  3899   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
  3901   SvcGCMarker sgcm(SvcGCMarker::MINOR);
  3902   ResourceMark rm;
  3904   print_heap_before_gc();
  3905   trace_heap_before_gc(_gc_tracer_stw);
  3907   verify_region_sets_optional();
  3908   verify_dirty_young_regions();
  3910   // This call will decide whether this pause is an initial-mark
  3911   // pause. If it is, during_initial_mark_pause() will return true
  3912   // for the duration of this pause.
  3913   g1_policy()->decide_on_conc_mark_initiation();
  3915   // We do not allow initial-mark to be piggy-backed on a mixed GC.
  3916   assert(!g1_policy()->during_initial_mark_pause() ||
  3917           g1_policy()->gcs_are_young(), "sanity");
  3919   // We also do not allow mixed GCs during marking.
  3920   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
  3922   // Record whether this pause is an initial mark. When the current
  3923   // thread has completed its logging output and it's safe to signal
  3924   // the CM thread, the flag's value in the policy has been reset.
  3925   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
  3927   // Inner scope for scope based logging, timers, and stats collection
  3929     EvacuationInfo evacuation_info;
  3931     if (g1_policy()->during_initial_mark_pause()) {
  3932       // We are about to start a marking cycle, so we increment the
  3933       // full collection counter.
  3934       increment_old_marking_cycles_started();
  3935       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
  3938     _gc_tracer_stw->report_yc_type(yc_type());
  3940     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  3942     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  3943                                 workers()->active_workers() : 1);
  3944     double pause_start_sec = os::elapsedTime();
  3945     g1_policy()->phase_times()->note_gc_start(active_workers);
  3946     log_gc_header();
  3948     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
  3949     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
  3951     // If the secondary_free_list is not empty, append it to the
  3952     // free_list. No need to wait for the cleanup operation to finish;
  3953     // the region allocation code will check the secondary_free_list
  3954     // and wait if necessary. If the G1StressConcRegionFreeing flag is
  3955     // set, skip this step so that the region allocation code has to
  3956     // get entries from the secondary_free_list.
  3957     if (!G1StressConcRegionFreeing) {
  3958       append_secondary_free_list_if_not_empty_with_lock();
  3961     assert(check_young_list_well_formed(), "young list should be well formed");
  3962     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3963            "sanity check");
  3965     // Don't dynamically change the number of GC threads this early.  A value of
  3966     // 0 is used to indicate serial work.  When parallel work is done,
  3967     // it will be set.
  3969     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  3970       IsGCActiveMark x;
  3972       gc_prologue(false);
  3973       increment_total_collections(false /* full gc */);
  3974       increment_gc_time_stamp();
  3976       verify_before_gc();
  3978       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3980       // Please see comment in g1CollectedHeap.hpp and
  3981       // G1CollectedHeap::ref_processing_init() to see how
  3982       // reference processing currently works in G1.
  3984       // Enable discovery in the STW reference processor
  3985       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
  3986                                             true /*verify_no_refs*/);
  3989         // We want to temporarily turn off discovery by the
  3990         // CM ref processor, if necessary, and turn it back on
  3991         // on again later if we do. Using a scoped
  3992         // NoRefDiscovery object will do this.
  3993         NoRefDiscovery no_cm_discovery(ref_processor_cm());
  3995         // Forget the current alloc region (we might even choose it to be part
  3996         // of the collection set!).
  3997         release_mutator_alloc_region();
  3999         // We should call this after we retire the mutator alloc
  4000         // region(s) so that all the ALLOC / RETIRE events are generated
  4001         // before the start GC event.
  4002         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
  4004         // This timing is only used by the ergonomics to handle our pause target.
  4005         // It is unclear why this should not include the full pause. We will
  4006         // investigate this in CR 7178365.
  4007         //
  4008         // Preserving the old comment here if that helps the investigation:
  4009         //
  4010         // The elapsed time induced by the start time below deliberately elides
  4011         // the possible verification above.
  4012         double sample_start_time_sec = os::elapsedTime();
  4014 #if YOUNG_LIST_VERBOSE
  4015         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  4016         _young_list->print();
  4017         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  4018 #endif // YOUNG_LIST_VERBOSE
  4020         g1_policy()->record_collection_pause_start(sample_start_time_sec);
  4022         double scan_wait_start = os::elapsedTime();
  4023         // We have to wait until the CM threads finish scanning the
  4024         // root regions as it's the only way to ensure that all the
  4025         // objects on them have been correctly scanned before we start
  4026         // moving them during the GC.
  4027         bool waited = _cm->root_regions()->wait_until_scan_finished();
  4028         double wait_time_ms = 0.0;
  4029         if (waited) {
  4030           double scan_wait_end = os::elapsedTime();
  4031           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
  4033         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
  4035 #if YOUNG_LIST_VERBOSE
  4036         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  4037         _young_list->print();
  4038 #endif // YOUNG_LIST_VERBOSE
  4040         if (g1_policy()->during_initial_mark_pause()) {
  4041           concurrent_mark()->checkpointRootsInitialPre();
  4044 #if YOUNG_LIST_VERBOSE
  4045         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  4046         _young_list->print();
  4047         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  4048 #endif // YOUNG_LIST_VERBOSE
  4050         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
  4052         _cm->note_start_of_gc();
  4053         // We should not verify the per-thread SATB buffers given that
  4054         // we have not filtered them yet (we'll do so during the
  4055         // GC). We also call this after finalize_cset() to
  4056         // ensure that the CSet has been finalized.
  4057         _cm->verify_no_cset_oops(true  /* verify_stacks */,
  4058                                  true  /* verify_enqueued_buffers */,
  4059                                  false /* verify_thread_buffers */,
  4060                                  true  /* verify_fingers */);
  4062         if (_hr_printer.is_active()) {
  4063           HeapRegion* hr = g1_policy()->collection_set();
  4064           while (hr != NULL) {
  4065             G1HRPrinter::RegionType type;
  4066             if (!hr->is_young()) {
  4067               type = G1HRPrinter::Old;
  4068             } else if (hr->is_survivor()) {
  4069               type = G1HRPrinter::Survivor;
  4070             } else {
  4071               type = G1HRPrinter::Eden;
  4073             _hr_printer.cset(hr);
  4074             hr = hr->next_in_collection_set();
  4078 #ifdef ASSERT
  4079         VerifyCSetClosure cl;
  4080         collection_set_iterate(&cl);
  4081 #endif // ASSERT
  4083         setup_surviving_young_words();
  4085         // Initialize the GC alloc regions.
  4086         init_gc_alloc_regions(evacuation_info);
  4088         // Actually do the work...
  4089         evacuate_collection_set(evacuation_info);
  4091         // We do this to mainly verify the per-thread SATB buffers
  4092         // (which have been filtered by now) since we didn't verify
  4093         // them earlier. No point in re-checking the stacks / enqueued
  4094         // buffers given that the CSet has not changed since last time
  4095         // we checked.
  4096         _cm->verify_no_cset_oops(false /* verify_stacks */,
  4097                                  false /* verify_enqueued_buffers */,
  4098                                  true  /* verify_thread_buffers */,
  4099                                  true  /* verify_fingers */);
  4101         free_collection_set(g1_policy()->collection_set(), evacuation_info);
  4102         g1_policy()->clear_collection_set();
  4104         cleanup_surviving_young_words();
  4106         // Start a new incremental collection set for the next pause.
  4107         g1_policy()->start_incremental_cset_building();
  4109         // Clear the _cset_fast_test bitmap in anticipation of adding
  4110         // regions to the incremental collection set for the next
  4111         // evacuation pause.
  4112         clear_cset_fast_test();
  4114         _young_list->reset_sampled_info();
  4116         // Don't check the whole heap at this point as the
  4117         // GC alloc regions from this pause have been tagged
  4118         // as survivors and moved on to the survivor list.
  4119         // Survivor regions will fail the !is_young() check.
  4120         assert(check_young_list_empty(false /* check_heap */),
  4121           "young list should be empty");
  4123 #if YOUNG_LIST_VERBOSE
  4124         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
  4125         _young_list->print();
  4126 #endif // YOUNG_LIST_VERBOSE
  4128         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  4129                                              _young_list->first_survivor_region(),
  4130                                              _young_list->last_survivor_region());
  4132         _young_list->reset_auxilary_lists();
  4134         if (evacuation_failed()) {
  4135           _summary_bytes_used = recalculate_used();
  4136           uint n_queues = MAX2((int)ParallelGCThreads, 1);
  4137           for (uint i = 0; i < n_queues; i++) {
  4138             if (_evacuation_failed_info_array[i].has_failed()) {
  4139               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
  4142         } else {
  4143           // The "used" of the the collection set have already been subtracted
  4144           // when they were freed.  Add in the bytes evacuated.
  4145           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
  4148         if (g1_policy()->during_initial_mark_pause()) {
  4149           // We have to do this before we notify the CM threads that
  4150           // they can start working to make sure that all the
  4151           // appropriate initialization is done on the CM object.
  4152           concurrent_mark()->checkpointRootsInitialPost();
  4153           set_marking_started();
  4154           // Note that we don't actually trigger the CM thread at
  4155           // this point. We do that later when we're sure that
  4156           // the current thread has completed its logging output.
  4159         allocate_dummy_regions();
  4161 #if YOUNG_LIST_VERBOSE
  4162         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  4163         _young_list->print();
  4164         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  4165 #endif // YOUNG_LIST_VERBOSE
  4167         init_mutator_alloc_region();
  4170           size_t expand_bytes = g1_policy()->expansion_amount();
  4171           if (expand_bytes > 0) {
  4172             size_t bytes_before = capacity();
  4173             // No need for an ergo verbose message here,
  4174             // expansion_amount() does this when it returns a value > 0.
  4175             if (!expand(expand_bytes)) {
  4176               // We failed to expand the heap so let's verify that
  4177               // committed/uncommitted amount match the backing store
  4178               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
  4179               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
  4184         // We redo the verification but now wrt to the new CSet which
  4185         // has just got initialized after the previous CSet was freed.
  4186         _cm->verify_no_cset_oops(true  /* verify_stacks */,
  4187                                  true  /* verify_enqueued_buffers */,
  4188                                  true  /* verify_thread_buffers */,
  4189                                  true  /* verify_fingers */);
  4190         _cm->note_end_of_gc();
  4192         // This timing is only used by the ergonomics to handle our pause target.
  4193         // It is unclear why this should not include the full pause. We will
  4194         // investigate this in CR 7178365.
  4195         double sample_end_time_sec = os::elapsedTime();
  4196         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
  4197         g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
  4199         MemoryService::track_memory_usage();
  4201         // In prepare_for_verify() below we'll need to scan the deferred
  4202         // update buffers to bring the RSets up-to-date if
  4203         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
  4204         // the update buffers we'll probably need to scan cards on the
  4205         // regions we just allocated to (i.e., the GC alloc
  4206         // regions). However, during the last GC we called
  4207         // set_saved_mark() on all the GC alloc regions, so card
  4208         // scanning might skip the [saved_mark_word()...top()] area of
  4209         // those regions (i.e., the area we allocated objects into
  4210         // during the last GC). But it shouldn't. Given that
  4211         // saved_mark_word() is conditional on whether the GC time stamp
  4212         // on the region is current or not, by incrementing the GC time
  4213         // stamp here we invalidate all the GC time stamps on all the
  4214         // regions and saved_mark_word() will simply return top() for
  4215         // all the regions. This is a nicer way of ensuring this rather
  4216         // than iterating over the regions and fixing them. In fact, the
  4217         // GC time stamp increment here also ensures that
  4218         // saved_mark_word() will return top() between pauses, i.e.,
  4219         // during concurrent refinement. So we don't need the
  4220         // is_gc_active() check to decided which top to use when
  4221         // scanning cards (see CR 7039627).
  4222         increment_gc_time_stamp();
  4224         verify_after_gc();
  4226         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  4227         ref_processor_stw()->verify_no_references_recorded();
  4229         // CM reference discovery will be re-enabled if necessary.
  4232       // We should do this after we potentially expand the heap so
  4233       // that all the COMMIT events are generated before the end GC
  4234       // event, and after we retire the GC alloc regions so that all
  4235       // RETIRE events are generated before the end GC event.
  4236       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
  4238       if (mark_in_progress()) {
  4239         concurrent_mark()->update_g1_committed();
  4242 #ifdef TRACESPINNING
  4243       ParallelTaskTerminator::print_termination_counts();
  4244 #endif
  4246       gc_epilogue(false);
  4249     // Print the remainder of the GC log output.
  4250     log_gc_footer(os::elapsedTime() - pause_start_sec);
  4252     // It is not yet to safe to tell the concurrent mark to
  4253     // start as we have some optional output below. We don't want the
  4254     // output from the concurrent mark thread interfering with this
  4255     // logging output either.
  4257     _hrs.verify_optional();
  4258     verify_region_sets_optional();
  4260     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
  4261     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  4263     print_heap_after_gc();
  4264     trace_heap_after_gc(_gc_tracer_stw);
  4266     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  4267     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  4268     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  4269     // before any GC notifications are raised.
  4270     g1mm()->update_sizes();
  4272     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
  4273     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
  4274     _gc_timer_stw->register_gc_end();
  4275     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
  4277   // It should now be safe to tell the concurrent mark thread to start
  4278   // without its logging output interfering with the logging output
  4279   // that came from the pause.
  4281   if (should_start_conc_mark) {
  4282     // CAUTION: after the doConcurrentMark() call below,
  4283     // the concurrent marking thread(s) could be running
  4284     // concurrently with us. Make sure that anything after
  4285     // this point does not assume that we are the only GC thread
  4286     // running. Note: of course, the actual marking work will
  4287     // not start until the safepoint itself is released in
  4288     // ConcurrentGCThread::safepoint_desynchronize().
  4289     doConcurrentMark();
  4292   return true;
  4295 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
  4297   size_t gclab_word_size;
  4298   switch (purpose) {
  4299     case GCAllocForSurvived:
  4300       gclab_word_size = _survivor_plab_stats.desired_plab_sz();
  4301       break;
  4302     case GCAllocForTenured:
  4303       gclab_word_size = _old_plab_stats.desired_plab_sz();
  4304       break;
  4305     default:
  4306       assert(false, "unknown GCAllocPurpose");
  4307       gclab_word_size = _old_plab_stats.desired_plab_sz();
  4308       break;
  4311   // Prevent humongous PLAB sizes for two reasons:
  4312   // * PLABs are allocated using a similar paths as oops, but should
  4313   //   never be in a humongous region
  4314   // * Allowing humongous PLABs needlessly churns the region free lists
  4315   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
  4318 void G1CollectedHeap::init_mutator_alloc_region() {
  4319   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  4320   _mutator_alloc_region.init();
  4323 void G1CollectedHeap::release_mutator_alloc_region() {
  4324   _mutator_alloc_region.release();
  4325   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  4328 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
  4329   assert_at_safepoint(true /* should_be_vm_thread */);
  4331   _survivor_gc_alloc_region.init();
  4332   _old_gc_alloc_region.init();
  4333   HeapRegion* retained_region = _retained_old_gc_alloc_region;
  4334   _retained_old_gc_alloc_region = NULL;
  4336   // We will discard the current GC alloc region if:
  4337   // a) it's in the collection set (it can happen!),
  4338   // b) it's already full (no point in using it),
  4339   // c) it's empty (this means that it was emptied during
  4340   // a cleanup and it should be on the free list now), or
  4341   // d) it's humongous (this means that it was emptied
  4342   // during a cleanup and was added to the free list, but
  4343   // has been subsequently used to allocate a humongous
  4344   // object that may be less than the region size).
  4345   if (retained_region != NULL &&
  4346       !retained_region->in_collection_set() &&
  4347       !(retained_region->top() == retained_region->end()) &&
  4348       !retained_region->is_empty() &&
  4349       !retained_region->isHumongous()) {
  4350     retained_region->set_saved_mark();
  4351     // The retained region was added to the old region set when it was
  4352     // retired. We have to remove it now, since we don't allow regions
  4353     // we allocate to in the region sets. We'll re-add it later, when
  4354     // it's retired again.
  4355     _old_set.remove(retained_region);
  4356     bool during_im = g1_policy()->during_initial_mark_pause();
  4357     retained_region->note_start_of_copying(during_im);
  4358     _old_gc_alloc_region.set(retained_region);
  4359     _hr_printer.reuse(retained_region);
  4360     evacuation_info.set_alloc_regions_used_before(retained_region->used());
  4364 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
  4365   evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
  4366                                          _old_gc_alloc_region.count());
  4367   _survivor_gc_alloc_region.release();
  4368   // If we have an old GC alloc region to release, we'll save it in
  4369   // _retained_old_gc_alloc_region. If we don't
  4370   // _retained_old_gc_alloc_region will become NULL. This is what we
  4371   // want either way so no reason to check explicitly for either
  4372   // condition.
  4373   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
  4375   if (ResizePLAB) {
  4376     _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
  4377     _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
  4381 void G1CollectedHeap::abandon_gc_alloc_regions() {
  4382   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
  4383   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
  4384   _retained_old_gc_alloc_region = NULL;
  4387 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  4388   _drain_in_progress = false;
  4389   set_evac_failure_closure(cl);
  4390   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
  4393 void G1CollectedHeap::finalize_for_evac_failure() {
  4394   assert(_evac_failure_scan_stack != NULL &&
  4395          _evac_failure_scan_stack->length() == 0,
  4396          "Postcondition");
  4397   assert(!_drain_in_progress, "Postcondition");
  4398   delete _evac_failure_scan_stack;
  4399   _evac_failure_scan_stack = NULL;
  4402 void G1CollectedHeap::remove_self_forwarding_pointers() {
  4403   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  4405   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
  4407   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4408     set_par_threads();
  4409     workers()->run_task(&rsfp_task);
  4410     set_par_threads(0);
  4411   } else {
  4412     rsfp_task.work(0);
  4415   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
  4417   // Reset the claim values in the regions in the collection set.
  4418   reset_cset_heap_region_claim_values();
  4420   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  4422   // Now restore saved marks, if any.
  4423   assert(_objs_with_preserved_marks.size() ==
  4424             _preserved_marks_of_objs.size(), "Both or none.");
  4425   while (!_objs_with_preserved_marks.is_empty()) {
  4426     oop obj = _objs_with_preserved_marks.pop();
  4427     markOop m = _preserved_marks_of_objs.pop();
  4428     obj->set_mark(m);
  4430   _objs_with_preserved_marks.clear(true);
  4431   _preserved_marks_of_objs.clear(true);
  4434 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  4435   _evac_failure_scan_stack->push(obj);
  4438 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  4439   assert(_evac_failure_scan_stack != NULL, "precondition");
  4441   while (_evac_failure_scan_stack->length() > 0) {
  4442      oop obj = _evac_failure_scan_stack->pop();
  4443      _evac_failure_closure->set_region(heap_region_containing(obj));
  4444      obj->oop_iterate_backwards(_evac_failure_closure);
  4448 oop
  4449 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
  4450                                                oop old) {
  4451   assert(obj_in_cs(old),
  4452          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
  4453                  (HeapWord*) old));
  4454   markOop m = old->mark();
  4455   oop forward_ptr = old->forward_to_atomic(old);
  4456   if (forward_ptr == NULL) {
  4457     // Forward-to-self succeeded.
  4458     assert(_par_scan_state != NULL, "par scan state");
  4459     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  4460     uint queue_num = _par_scan_state->queue_num();
  4462     _evacuation_failed = true;
  4463     _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
  4464     if (_evac_failure_closure != cl) {
  4465       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  4466       assert(!_drain_in_progress,
  4467              "Should only be true while someone holds the lock.");
  4468       // Set the global evac-failure closure to the current thread's.
  4469       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  4470       set_evac_failure_closure(cl);
  4471       // Now do the common part.
  4472       handle_evacuation_failure_common(old, m);
  4473       // Reset to NULL.
  4474       set_evac_failure_closure(NULL);
  4475     } else {
  4476       // The lock is already held, and this is recursive.
  4477       assert(_drain_in_progress, "This should only be the recursive case.");
  4478       handle_evacuation_failure_common(old, m);
  4480     return old;
  4481   } else {
  4482     // Forward-to-self failed. Either someone else managed to allocate
  4483     // space for this object (old != forward_ptr) or they beat us in
  4484     // self-forwarding it (old == forward_ptr).
  4485     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
  4486            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
  4487                    "should not be in the CSet",
  4488                    (HeapWord*) old, (HeapWord*) forward_ptr));
  4489     return forward_ptr;
  4493 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  4494   preserve_mark_if_necessary(old, m);
  4496   HeapRegion* r = heap_region_containing(old);
  4497   if (!r->evacuation_failed()) {
  4498     r->set_evacuation_failed(true);
  4499     _hr_printer.evac_failure(r);
  4502   push_on_evac_failure_scan_stack(old);
  4504   if (!_drain_in_progress) {
  4505     // prevent recursion in copy_to_survivor_space()
  4506     _drain_in_progress = true;
  4507     drain_evac_failure_scan_stack();
  4508     _drain_in_progress = false;
  4512 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  4513   assert(evacuation_failed(), "Oversaving!");
  4514   // We want to call the "for_promotion_failure" version only in the
  4515   // case of a promotion failure.
  4516   if (m->must_be_preserved_for_promotion_failure(obj)) {
  4517     _objs_with_preserved_marks.push(obj);
  4518     _preserved_marks_of_objs.push(m);
  4522 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  4523                                                   size_t word_size) {
  4524   if (purpose == GCAllocForSurvived) {
  4525     HeapWord* result = survivor_attempt_allocation(word_size);
  4526     if (result != NULL) {
  4527       return result;
  4528     } else {
  4529       // Let's try to allocate in the old gen in case we can fit the
  4530       // object there.
  4531       return old_attempt_allocation(word_size);
  4533   } else {
  4534     assert(purpose ==  GCAllocForTenured, "sanity");
  4535     HeapWord* result = old_attempt_allocation(word_size);
  4536     if (result != NULL) {
  4537       return result;
  4538     } else {
  4539       // Let's try to allocate in the survivors in case we can fit the
  4540       // object there.
  4541       return survivor_attempt_allocation(word_size);
  4545   ShouldNotReachHere();
  4546   // Trying to keep some compilers happy.
  4547   return NULL;
  4550 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
  4551   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
  4553 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
  4554   : _g1h(g1h),
  4555     _refs(g1h->task_queue(queue_num)),
  4556     _dcq(&g1h->dirty_card_queue_set()),
  4557     _ct_bs(g1h->g1_barrier_set()),
  4558     _g1_rem(g1h->g1_rem_set()),
  4559     _hash_seed(17), _queue_num(queue_num),
  4560     _term_attempts(0),
  4561     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
  4562     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
  4563     _age_table(false), _scanner(g1h, this, rp),
  4564     _strong_roots_time(0), _term_time(0),
  4565     _alloc_buffer_waste(0), _undo_waste(0) {
  4566   // we allocate G1YoungSurvRateNumRegions plus one entries, since
  4567   // we "sacrifice" entry 0 to keep track of surviving bytes for
  4568   // non-young regions (where the age is -1)
  4569   // We also add a few elements at the beginning and at the end in
  4570   // an attempt to eliminate cache contention
  4571   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
  4572   uint array_length = PADDING_ELEM_NUM +
  4573                       real_length +
  4574                       PADDING_ELEM_NUM;
  4575   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
  4576   if (_surviving_young_words_base == NULL)
  4577     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
  4578                           "Not enough space for young surv histo.");
  4579   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  4580   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
  4582   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
  4583   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
  4585   _start = os::elapsedTime();
  4588 void
  4589 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
  4591   st->print_raw_cr("GC Termination Stats");
  4592   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
  4593                    " ------waste (KiB)------");
  4594   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
  4595                    "  total   alloc    undo");
  4596   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
  4597                    " ------- ------- -------");
  4600 void
  4601 G1ParScanThreadState::print_termination_stats(int i,
  4602                                               outputStream* const st) const
  4604   const double elapsed_ms = elapsed_time() * 1000.0;
  4605   const double s_roots_ms = strong_roots_time() * 1000.0;
  4606   const double term_ms    = term_time() * 1000.0;
  4607   st->print_cr("%3d %9.2f %9.2f %6.2f "
  4608                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
  4609                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
  4610                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
  4611                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
  4612                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
  4613                alloc_buffer_waste() * HeapWordSize / K,
  4614                undo_waste() * HeapWordSize / K);
  4617 #ifdef ASSERT
  4618 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
  4619   assert(ref != NULL, "invariant");
  4620   assert(UseCompressedOops, "sanity");
  4621   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
  4622   oop p = oopDesc::load_decode_heap_oop(ref);
  4623   assert(_g1h->is_in_g1_reserved(p),
  4624          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
  4625   return true;
  4628 bool G1ParScanThreadState::verify_ref(oop* ref) const {
  4629   assert(ref != NULL, "invariant");
  4630   if (has_partial_array_mask(ref)) {
  4631     // Must be in the collection set--it's already been copied.
  4632     oop p = clear_partial_array_mask(ref);
  4633     assert(_g1h->obj_in_cs(p),
  4634            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
  4635   } else {
  4636     oop p = oopDesc::load_decode_heap_oop(ref);
  4637     assert(_g1h->is_in_g1_reserved(p),
  4638            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
  4640   return true;
  4643 bool G1ParScanThreadState::verify_task(StarTask ref) const {
  4644   if (ref.is_narrow()) {
  4645     return verify_ref((narrowOop*) ref);
  4646   } else {
  4647     return verify_ref((oop*) ref);
  4650 #endif // ASSERT
  4652 void G1ParScanThreadState::trim_queue() {
  4653   assert(_evac_cl != NULL, "not set");
  4654   assert(_evac_failure_cl != NULL, "not set");
  4655   assert(_partial_scan_cl != NULL, "not set");
  4657   StarTask ref;
  4658   do {
  4659     // Drain the overflow stack first, so other threads can steal.
  4660     while (refs()->pop_overflow(ref)) {
  4661       deal_with_reference(ref);
  4664     while (refs()->pop_local(ref)) {
  4665       deal_with_reference(ref);
  4667   } while (!refs()->is_empty());
  4670 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
  4671                                      G1ParScanThreadState* par_scan_state) :
  4672   _g1(g1), _par_scan_state(par_scan_state),
  4673   _worker_id(par_scan_state->queue_num()) { }
  4675 void G1ParCopyHelper::mark_object(oop obj) {
  4676 #ifdef ASSERT
  4677   HeapRegion* hr = _g1->heap_region_containing(obj);
  4678   assert(hr != NULL, "sanity");
  4679   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
  4680 #endif // ASSERT
  4682   // We know that the object is not moving so it's safe to read its size.
  4683   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
  4686 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
  4687 #ifdef ASSERT
  4688   assert(from_obj->is_forwarded(), "from obj should be forwarded");
  4689   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
  4690   assert(from_obj != to_obj, "should not be self-forwarded");
  4692   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
  4693   assert(from_hr != NULL, "sanity");
  4694   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
  4696   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
  4697   assert(to_hr != NULL, "sanity");
  4698   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
  4699 #endif // ASSERT
  4701   // The object might be in the process of being copied by another
  4702   // worker so we cannot trust that its to-space image is
  4703   // well-formed. So we have to read its size from its from-space
  4704   // image which we know should not be changing.
  4705   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
  4708 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
  4709   size_t word_sz = old->size();
  4710   HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
  4711   // +1 to make the -1 indexes valid...
  4712   int       young_index = from_region->young_index_in_cset()+1;
  4713   assert( (from_region->is_young() && young_index >  0) ||
  4714          (!from_region->is_young() && young_index == 0), "invariant" );
  4715   G1CollectorPolicy* g1p = _g1h->g1_policy();
  4716   markOop m = old->mark();
  4717   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  4718                                            : m->age();
  4719   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  4720                                                              word_sz);
  4721   HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
  4722 #ifndef PRODUCT
  4723   // Should this evacuation fail?
  4724   if (_g1h->evacuation_should_fail()) {
  4725     if (obj_ptr != NULL) {
  4726       undo_allocation(alloc_purpose, obj_ptr, word_sz);
  4727       obj_ptr = NULL;
  4730 #endif // !PRODUCT
  4732   if (obj_ptr == NULL) {
  4733     // This will either forward-to-self, or detect that someone else has
  4734     // installed a forwarding pointer.
  4735     return _g1h->handle_evacuation_failure_par(this, old);
  4738   oop obj = oop(obj_ptr);
  4740   // We're going to allocate linearly, so might as well prefetch ahead.
  4741   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
  4743   oop forward_ptr = old->forward_to_atomic(obj);
  4744   if (forward_ptr == NULL) {
  4745     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
  4747     // alloc_purpose is just a hint to allocate() above, recheck the type of region
  4748     // we actually allocated from and update alloc_purpose accordingly
  4749     HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
  4750     alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
  4752     if (g1p->track_object_age(alloc_purpose)) {
  4753       // We could simply do obj->incr_age(). However, this causes a
  4754       // performance issue. obj->incr_age() will first check whether
  4755       // the object has a displaced mark by checking its mark word;
  4756       // getting the mark word from the new location of the object
  4757       // stalls. So, given that we already have the mark word and we
  4758       // are about to install it anyway, it's better to increase the
  4759       // age on the mark word, when the object does not have a
  4760       // displaced mark word. We're not expecting many objects to have
  4761       // a displaced marked word, so that case is not optimized
  4762       // further (it could be...) and we simply call obj->incr_age().
  4764       if (m->has_displaced_mark_helper()) {
  4765         // in this case, we have to install the mark word first,
  4766         // otherwise obj looks to be forwarded (the old mark word,
  4767         // which contains the forward pointer, was copied)
  4768         obj->set_mark(m);
  4769         obj->incr_age();
  4770       } else {
  4771         m = m->incr_age();
  4772         obj->set_mark(m);
  4774       age_table()->add(obj, word_sz);
  4775     } else {
  4776       obj->set_mark(m);
  4779     size_t* surv_young_words = surviving_young_words();
  4780     surv_young_words[young_index] += word_sz;
  4782     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
  4783       // We keep track of the next start index in the length field of
  4784       // the to-space object. The actual length can be found in the
  4785       // length field of the from-space object.
  4786       arrayOop(obj)->set_length(0);
  4787       oop* old_p = set_partial_array_mask(old);
  4788       push_on_queue(old_p);
  4789     } else {
  4790       // No point in using the slower heap_region_containing() method,
  4791       // given that we know obj is in the heap.
  4792       _scanner.set_region(_g1h->heap_region_containing_raw(obj));
  4793       obj->oop_iterate_backwards(&_scanner);
  4795   } else {
  4796     undo_allocation(alloc_purpose, obj_ptr, word_sz);
  4797     obj = forward_ptr;
  4799   return obj;
  4802 template <class T>
  4803 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
  4804   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
  4805     _scanned_klass->record_modified_oops();
  4809 template <G1Barrier barrier, bool do_mark_object>
  4810 template <class T>
  4811 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
  4812   T heap_oop = oopDesc::load_heap_oop(p);
  4814   if (oopDesc::is_null(heap_oop)) {
  4815     return;
  4818   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  4820   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
  4822   if (_g1->in_cset_fast_test(obj)) {
  4823     oop forwardee;
  4824     if (obj->is_forwarded()) {
  4825       forwardee = obj->forwardee();
  4826     } else {
  4827       forwardee = _par_scan_state->copy_to_survivor_space(obj);
  4829     assert(forwardee != NULL, "forwardee should not be NULL");
  4830     oopDesc::encode_store_heap_oop(p, forwardee);
  4831     if (do_mark_object && forwardee != obj) {
  4832       // If the object is self-forwarded we don't need to explicitly
  4833       // mark it, the evacuation failure protocol will do so.
  4834       mark_forwarded_object(obj, forwardee);
  4837     if (barrier == G1BarrierKlass) {
  4838       do_klass_barrier(p, forwardee);
  4840   } else {
  4841     // The object is not in collection set. If we're a root scanning
  4842     // closure during an initial mark pause (i.e. do_mark_object will
  4843     // be true) then attempt to mark the object.
  4844     if (do_mark_object) {
  4845       mark_object(obj);
  4849   if (barrier == G1BarrierEvac) {
  4850     _par_scan_state->update_rs(_from, p, _worker_id);
  4854 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
  4855 template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
  4857 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
  4858   assert(has_partial_array_mask(p), "invariant");
  4859   oop from_obj = clear_partial_array_mask(p);
  4861   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
  4862   assert(from_obj->is_objArray(), "must be obj array");
  4863   objArrayOop from_obj_array = objArrayOop(from_obj);
  4864   // The from-space object contains the real length.
  4865   int length                 = from_obj_array->length();
  4867   assert(from_obj->is_forwarded(), "must be forwarded");
  4868   oop to_obj                 = from_obj->forwardee();
  4869   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
  4870   objArrayOop to_obj_array   = objArrayOop(to_obj);
  4871   // We keep track of the next start index in the length field of the
  4872   // to-space object.
  4873   int next_index             = to_obj_array->length();
  4874   assert(0 <= next_index && next_index < length,
  4875          err_msg("invariant, next index: %d, length: %d", next_index, length));
  4877   int start                  = next_index;
  4878   int end                    = length;
  4879   int remainder              = end - start;
  4880   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
  4881   if (remainder > 2 * ParGCArrayScanChunk) {
  4882     end = start + ParGCArrayScanChunk;
  4883     to_obj_array->set_length(end);
  4884     // Push the remainder before we process the range in case another
  4885     // worker has run out of things to do and can steal it.
  4886     oop* from_obj_p = set_partial_array_mask(from_obj);
  4887     _par_scan_state->push_on_queue(from_obj_p);
  4888   } else {
  4889     assert(length == end, "sanity");
  4890     // We'll process the final range for this object. Restore the length
  4891     // so that the heap remains parsable in case of evacuation failure.
  4892     to_obj_array->set_length(end);
  4894   _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
  4895   // Process indexes [start,end). It will also process the header
  4896   // along with the first chunk (i.e., the chunk with start == 0).
  4897   // Note that at this point the length field of to_obj_array is not
  4898   // correct given that we are using it to keep track of the next
  4899   // start index. oop_iterate_range() (thankfully!) ignores the length
  4900   // field and only relies on the start / end parameters.  It does
  4901   // however return the size of the object which will be incorrect. So
  4902   // we have to ignore it even if we wanted to use it.
  4903   to_obj_array->oop_iterate_range(&_scanner, start, end);
  4906 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4907 protected:
  4908   G1CollectedHeap*              _g1h;
  4909   G1ParScanThreadState*         _par_scan_state;
  4910   RefToScanQueueSet*            _queues;
  4911   ParallelTaskTerminator*       _terminator;
  4913   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  4914   RefToScanQueueSet*      queues()         { return _queues; }
  4915   ParallelTaskTerminator* terminator()     { return _terminator; }
  4917 public:
  4918   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  4919                                 G1ParScanThreadState* par_scan_state,
  4920                                 RefToScanQueueSet* queues,
  4921                                 ParallelTaskTerminator* terminator)
  4922     : _g1h(g1h), _par_scan_state(par_scan_state),
  4923       _queues(queues), _terminator(terminator) {}
  4925   void do_void();
  4927 private:
  4928   inline bool offer_termination();
  4929 };
  4931 bool G1ParEvacuateFollowersClosure::offer_termination() {
  4932   G1ParScanThreadState* const pss = par_scan_state();
  4933   pss->start_term_time();
  4934   const bool res = terminator()->offer_termination();
  4935   pss->end_term_time();
  4936   return res;
  4939 void G1ParEvacuateFollowersClosure::do_void() {
  4940   StarTask stolen_task;
  4941   G1ParScanThreadState* const pss = par_scan_state();
  4942   pss->trim_queue();
  4944   do {
  4945     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  4946       assert(pss->verify_task(stolen_task), "sanity");
  4947       if (stolen_task.is_narrow()) {
  4948         pss->deal_with_reference((narrowOop*) stolen_task);
  4949       } else {
  4950         pss->deal_with_reference((oop*) stolen_task);
  4953       // We've just processed a reference and we might have made
  4954       // available new entries on the queues. So we have to make sure
  4955       // we drain the queues as necessary.
  4956       pss->trim_queue();
  4958   } while (!offer_termination());
  4960   pss->retire_alloc_buffers();
  4963 class G1KlassScanClosure : public KlassClosure {
  4964  G1ParCopyHelper* _closure;
  4965  bool             _process_only_dirty;
  4966  int              _count;
  4967  public:
  4968   G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
  4969       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
  4970   void do_klass(Klass* klass) {
  4971     // If the klass has not been dirtied we know that there's
  4972     // no references into  the young gen and we can skip it.
  4973    if (!_process_only_dirty || klass->has_modified_oops()) {
  4974       // Clean the klass since we're going to scavenge all the metadata.
  4975       klass->clear_modified_oops();
  4977       // Tell the closure that this klass is the Klass to scavenge
  4978       // and is the one to dirty if oops are left pointing into the young gen.
  4979       _closure->set_scanned_klass(klass);
  4981       klass->oops_do(_closure);
  4983       _closure->set_scanned_klass(NULL);
  4985     _count++;
  4987 };
  4989 class G1ParTask : public AbstractGangTask {
  4990 protected:
  4991   G1CollectedHeap*       _g1h;
  4992   RefToScanQueueSet      *_queues;
  4993   ParallelTaskTerminator _terminator;
  4994   uint _n_workers;
  4996   Mutex _stats_lock;
  4997   Mutex* stats_lock() { return &_stats_lock; }
  4999   size_t getNCards() {
  5000     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  5001       / G1BlockOffsetSharedArray::N_bytes;
  5004 public:
  5005   G1ParTask(G1CollectedHeap* g1h,
  5006             RefToScanQueueSet *task_queues)
  5007     : AbstractGangTask("G1 collection"),
  5008       _g1h(g1h),
  5009       _queues(task_queues),
  5010       _terminator(0, _queues),
  5011       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
  5012   {}
  5014   RefToScanQueueSet* queues() { return _queues; }
  5016   RefToScanQueue *work_queue(int i) {
  5017     return queues()->queue(i);
  5020   ParallelTaskTerminator* terminator() { return &_terminator; }
  5022   virtual void set_for_termination(int active_workers) {
  5023     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
  5024     // in the young space (_par_seq_tasks) in the G1 heap
  5025     // for SequentialSubTasksDone.
  5026     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
  5027     // both of which need setting by set_n_termination().
  5028     _g1h->SharedHeap::set_n_termination(active_workers);
  5029     _g1h->set_n_termination(active_workers);
  5030     terminator()->reset_for_reuse(active_workers);
  5031     _n_workers = active_workers;
  5034   void work(uint worker_id) {
  5035     if (worker_id >= _n_workers) return;  // no work needed this round
  5037     double start_time_ms = os::elapsedTime() * 1000.0;
  5038     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
  5041       ResourceMark rm;
  5042       HandleMark   hm;
  5044       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
  5046       G1ParScanThreadState            pss(_g1h, worker_id, rp);
  5047       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
  5048       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
  5049       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
  5051       pss.set_evac_closure(&scan_evac_cl);
  5052       pss.set_evac_failure_closure(&evac_failure_cl);
  5053       pss.set_partial_scan_closure(&partial_scan_cl);
  5055       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
  5056       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
  5058       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
  5059       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
  5061       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
  5062       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
  5063       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
  5065       OopClosure*                    scan_root_cl = &only_scan_root_cl;
  5066       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
  5068       if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5069         // We also need to mark copied objects.
  5070         scan_root_cl = &scan_mark_root_cl;
  5071         scan_klasses_cl = &scan_mark_klasses_cl_s;
  5074       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  5076       // Don't scan the scavengable methods in the code cache as part
  5077       // of strong root scanning. The code roots that point into a
  5078       // region in the collection set are scanned when we scan the
  5079       // region's RSet.
  5080       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
  5082       pss.start_strong_roots();
  5083       _g1h->g1_process_strong_roots(/* is scavenging */ true,
  5084                                     SharedHeap::ScanningOption(so),
  5085                                     scan_root_cl,
  5086                                     &push_heap_rs_cl,
  5087                                     scan_klasses_cl,
  5088                                     worker_id);
  5089       pss.end_strong_roots();
  5092         double start = os::elapsedTime();
  5093         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  5094         evac.do_void();
  5095         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  5096         double term_ms = pss.term_time()*1000.0;
  5097         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
  5098         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
  5100       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  5101       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  5103       if (ParallelGCVerbose) {
  5104         MutexLocker x(stats_lock());
  5105         pss.print_termination_stats(worker_id);
  5108       assert(pss.refs()->is_empty(), "should be empty");
  5110       // Close the inner scope so that the ResourceMark and HandleMark
  5111       // destructors are executed here and are included as part of the
  5112       // "GC Worker Time".
  5115     double end_time_ms = os::elapsedTime() * 1000.0;
  5116     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
  5118 };
  5120 // *** Common G1 Evacuation Stuff
  5122 // This method is run in a GC worker.
  5124 void
  5125 G1CollectedHeap::
  5126 g1_process_strong_roots(bool is_scavenging,
  5127                         ScanningOption so,
  5128                         OopClosure* scan_non_heap_roots,
  5129                         OopsInHeapRegionClosure* scan_rs,
  5130                         G1KlassScanClosure* scan_klasses,
  5131                         int worker_i) {
  5133   // First scan the strong roots
  5134   double ext_roots_start = os::elapsedTime();
  5135   double closure_app_time_sec = 0.0;
  5137   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  5139   assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
  5140   // Walk the code cache/strong code roots w/o buffering, because StarTask
  5141   // cannot handle unaligned oop locations.
  5142   CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
  5144   process_strong_roots(false, // no scoping; this is parallel code
  5145                        is_scavenging, so,
  5146                        &buf_scan_non_heap_roots,
  5147                        &eager_scan_code_roots,
  5148                        scan_klasses
  5149                        );
  5151   // Now the CM ref_processor roots.
  5152   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  5153     // We need to treat the discovered reference lists of the
  5154     // concurrent mark ref processor as roots and keep entries
  5155     // (which are added by the marking threads) on them live
  5156     // until they can be processed at the end of marking.
  5157     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
  5160   // Finish up any enqueued closure apps (attributed as object copy time).
  5161   buf_scan_non_heap_roots.done();
  5163   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
  5165   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  5167   double ext_root_time_ms =
  5168     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  5170   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  5172   // During conc marking we have to filter the per-thread SATB buffers
  5173   // to make sure we remove any oops into the CSet (which will show up
  5174   // as implicitly live).
  5175   double satb_filtering_ms = 0.0;
  5176   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
  5177     if (mark_in_progress()) {
  5178       double satb_filter_start = os::elapsedTime();
  5180       JavaThread::satb_mark_queue_set().filter_thread_buffers();
  5182       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
  5185   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
  5187   // If this is an initial mark pause, and we're not scanning
  5188   // the entire code cache, we need to mark the oops in the
  5189   // strong code root lists for the regions that are not in
  5190   // the collection set.
  5191   // Note all threads participate in this set of root tasks.
  5192   double mark_strong_code_roots_ms = 0.0;
  5193   if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
  5194     double mark_strong_roots_start = os::elapsedTime();
  5195     mark_strong_code_roots(worker_i);
  5196     mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
  5198   g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
  5200   // Now scan the complement of the collection set.
  5201   if (scan_rs != NULL) {
  5202     g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
  5204   _process_strong_tasks->all_tasks_completed();
  5207 void
  5208 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
  5209   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
  5210   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
  5213 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
  5214 private:
  5215   BoolObjectClosure* _is_alive;
  5216   int _initial_string_table_size;
  5217   int _initial_symbol_table_size;
  5219   bool  _process_strings;
  5220   int _strings_processed;
  5221   int _strings_removed;
  5223   bool  _process_symbols;
  5224   int _symbols_processed;
  5225   int _symbols_removed;
  5227   bool _do_in_parallel;
  5228 public:
  5229   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
  5230     AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
  5231     _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
  5232     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
  5233     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
  5235     _initial_string_table_size = StringTable::the_table()->table_size();
  5236     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
  5237     if (process_strings) {
  5238       StringTable::clear_parallel_claimed_index();
  5240     if (process_symbols) {
  5241       SymbolTable::clear_parallel_claimed_index();
  5245   ~G1StringSymbolTableUnlinkTask() {
  5246     guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
  5247               err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
  5248                       StringTable::parallel_claimed_index(), _initial_string_table_size));
  5249     guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
  5250               err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
  5251                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
  5254   void work(uint worker_id) {
  5255     if (_do_in_parallel) {
  5256       int strings_processed = 0;
  5257       int strings_removed = 0;
  5258       int symbols_processed = 0;
  5259       int symbols_removed = 0;
  5260       if (_process_strings) {
  5261         StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
  5262         Atomic::add(strings_processed, &_strings_processed);
  5263         Atomic::add(strings_removed, &_strings_removed);
  5265       if (_process_symbols) {
  5266         SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
  5267         Atomic::add(symbols_processed, &_symbols_processed);
  5268         Atomic::add(symbols_removed, &_symbols_removed);
  5270     } else {
  5271       if (_process_strings) {
  5272         StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
  5274       if (_process_symbols) {
  5275         SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
  5280   size_t strings_processed() const { return (size_t)_strings_processed; }
  5281   size_t strings_removed()   const { return (size_t)_strings_removed; }
  5283   size_t symbols_processed() const { return (size_t)_symbols_processed; }
  5284   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
  5285 };
  5287 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
  5288                                                      bool process_strings, bool process_symbols) {
  5289   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  5290                    _g1h->workers()->active_workers() : 1);
  5292   G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
  5293   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5294     set_par_threads(n_workers);
  5295     workers()->run_task(&g1_unlink_task);
  5296     set_par_threads(0);
  5297   } else {
  5298     g1_unlink_task.work(0);
  5300   if (G1TraceStringSymbolTableScrubbing) {
  5301     gclog_or_tty->print_cr("Cleaned string and symbol table, "
  5302                            "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
  5303                            "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
  5304                            g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
  5305                            g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
  5309 // Weak Reference Processing support
  5311 // An always "is_alive" closure that is used to preserve referents.
  5312 // If the object is non-null then it's alive.  Used in the preservation
  5313 // of referent objects that are pointed to by reference objects
  5314 // discovered by the CM ref processor.
  5315 class G1AlwaysAliveClosure: public BoolObjectClosure {
  5316   G1CollectedHeap* _g1;
  5317 public:
  5318   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  5319   bool do_object_b(oop p) {
  5320     if (p != NULL) {
  5321       return true;
  5323     return false;
  5325 };
  5327 bool G1STWIsAliveClosure::do_object_b(oop p) {
  5328   // An object is reachable if it is outside the collection set,
  5329   // or is inside and copied.
  5330   return !_g1->obj_in_cs(p) || p->is_forwarded();
  5333 // Non Copying Keep Alive closure
  5334 class G1KeepAliveClosure: public OopClosure {
  5335   G1CollectedHeap* _g1;
  5336 public:
  5337   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  5338   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  5339   void do_oop(      oop* p) {
  5340     oop obj = *p;
  5342     if (_g1->obj_in_cs(obj)) {
  5343       assert( obj->is_forwarded(), "invariant" );
  5344       *p = obj->forwardee();
  5347 };
  5349 // Copying Keep Alive closure - can be called from both
  5350 // serial and parallel code as long as different worker
  5351 // threads utilize different G1ParScanThreadState instances
  5352 // and different queues.
  5354 class G1CopyingKeepAliveClosure: public OopClosure {
  5355   G1CollectedHeap*         _g1h;
  5356   OopClosure*              _copy_non_heap_obj_cl;
  5357   OopsInHeapRegionClosure* _copy_metadata_obj_cl;
  5358   G1ParScanThreadState*    _par_scan_state;
  5360 public:
  5361   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
  5362                             OopClosure* non_heap_obj_cl,
  5363                             OopsInHeapRegionClosure* metadata_obj_cl,
  5364                             G1ParScanThreadState* pss):
  5365     _g1h(g1h),
  5366     _copy_non_heap_obj_cl(non_heap_obj_cl),
  5367     _copy_metadata_obj_cl(metadata_obj_cl),
  5368     _par_scan_state(pss)
  5369   {}
  5371   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  5372   virtual void do_oop(      oop* p) { do_oop_work(p); }
  5374   template <class T> void do_oop_work(T* p) {
  5375     oop obj = oopDesc::load_decode_heap_oop(p);
  5377     if (_g1h->obj_in_cs(obj)) {
  5378       // If the referent object has been forwarded (either copied
  5379       // to a new location or to itself in the event of an
  5380       // evacuation failure) then we need to update the reference
  5381       // field and, if both reference and referent are in the G1
  5382       // heap, update the RSet for the referent.
  5383       //
  5384       // If the referent has not been forwarded then we have to keep
  5385       // it alive by policy. Therefore we have copy the referent.
  5386       //
  5387       // If the reference field is in the G1 heap then we can push
  5388       // on the PSS queue. When the queue is drained (after each
  5389       // phase of reference processing) the object and it's followers
  5390       // will be copied, the reference field set to point to the
  5391       // new location, and the RSet updated. Otherwise we need to
  5392       // use the the non-heap or metadata closures directly to copy
  5393       // the referent object and update the pointer, while avoiding
  5394       // updating the RSet.
  5396       if (_g1h->is_in_g1_reserved(p)) {
  5397         _par_scan_state->push_on_queue(p);
  5398       } else {
  5399         assert(!ClassLoaderDataGraph::contains((address)p),
  5400                err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
  5401                               PTR_FORMAT, p));
  5402           _copy_non_heap_obj_cl->do_oop(p);
  5406 };
  5408 // Serial drain queue closure. Called as the 'complete_gc'
  5409 // closure for each discovered list in some of the
  5410 // reference processing phases.
  5412 class G1STWDrainQueueClosure: public VoidClosure {
  5413 protected:
  5414   G1CollectedHeap* _g1h;
  5415   G1ParScanThreadState* _par_scan_state;
  5417   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  5419 public:
  5420   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
  5421     _g1h(g1h),
  5422     _par_scan_state(pss)
  5423   { }
  5425   void do_void() {
  5426     G1ParScanThreadState* const pss = par_scan_state();
  5427     pss->trim_queue();
  5429 };
  5431 // Parallel Reference Processing closures
  5433 // Implementation of AbstractRefProcTaskExecutor for parallel reference
  5434 // processing during G1 evacuation pauses.
  5436 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
  5437 private:
  5438   G1CollectedHeap*   _g1h;
  5439   RefToScanQueueSet* _queues;
  5440   FlexibleWorkGang*  _workers;
  5441   int                _active_workers;
  5443 public:
  5444   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
  5445                         FlexibleWorkGang* workers,
  5446                         RefToScanQueueSet *task_queues,
  5447                         int n_workers) :
  5448     _g1h(g1h),
  5449     _queues(task_queues),
  5450     _workers(workers),
  5451     _active_workers(n_workers)
  5453     assert(n_workers > 0, "shouldn't call this otherwise");
  5456   // Executes the given task using concurrent marking worker threads.
  5457   virtual void execute(ProcessTask& task);
  5458   virtual void execute(EnqueueTask& task);
  5459 };
  5461 // Gang task for possibly parallel reference processing
  5463 class G1STWRefProcTaskProxy: public AbstractGangTask {
  5464   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
  5465   ProcessTask&     _proc_task;
  5466   G1CollectedHeap* _g1h;
  5467   RefToScanQueueSet *_task_queues;
  5468   ParallelTaskTerminator* _terminator;
  5470 public:
  5471   G1STWRefProcTaskProxy(ProcessTask& proc_task,
  5472                      G1CollectedHeap* g1h,
  5473                      RefToScanQueueSet *task_queues,
  5474                      ParallelTaskTerminator* terminator) :
  5475     AbstractGangTask("Process reference objects in parallel"),
  5476     _proc_task(proc_task),
  5477     _g1h(g1h),
  5478     _task_queues(task_queues),
  5479     _terminator(terminator)
  5480   {}
  5482   virtual void work(uint worker_id) {
  5483     // The reference processing task executed by a single worker.
  5484     ResourceMark rm;
  5485     HandleMark   hm;
  5487     G1STWIsAliveClosure is_alive(_g1h);
  5489     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
  5491     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
  5492     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
  5493     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
  5495     pss.set_evac_closure(&scan_evac_cl);
  5496     pss.set_evac_failure_closure(&evac_failure_cl);
  5497     pss.set_partial_scan_closure(&partial_scan_cl);
  5499     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
  5500     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
  5502     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
  5503     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
  5505     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5506     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
  5508     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5509       // We also need to mark copied objects.
  5510       copy_non_heap_cl = &copy_mark_non_heap_cl;
  5511       copy_metadata_cl = &copy_mark_metadata_cl;
  5514     // Keep alive closure.
  5515     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
  5517     // Complete GC closure
  5518     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
  5520     // Call the reference processing task's work routine.
  5521     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
  5523     // Note we cannot assert that the refs array is empty here as not all
  5524     // of the processing tasks (specifically phase2 - pp2_work) execute
  5525     // the complete_gc closure (which ordinarily would drain the queue) so
  5526     // the queue may not be empty.
  5528 };
  5530 // Driver routine for parallel reference processing.
  5531 // Creates an instance of the ref processing gang
  5532 // task and has the worker threads execute it.
  5533 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
  5534   assert(_workers != NULL, "Need parallel worker threads.");
  5536   ParallelTaskTerminator terminator(_active_workers, _queues);
  5537   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
  5539   _g1h->set_par_threads(_active_workers);
  5540   _workers->run_task(&proc_task_proxy);
  5541   _g1h->set_par_threads(0);
  5544 // Gang task for parallel reference enqueueing.
  5546 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
  5547   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
  5548   EnqueueTask& _enq_task;
  5550 public:
  5551   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
  5552     AbstractGangTask("Enqueue reference objects in parallel"),
  5553     _enq_task(enq_task)
  5554   { }
  5556   virtual void work(uint worker_id) {
  5557     _enq_task.work(worker_id);
  5559 };
  5561 // Driver routine for parallel reference enqueueing.
  5562 // Creates an instance of the ref enqueueing gang
  5563 // task and has the worker threads execute it.
  5565 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
  5566   assert(_workers != NULL, "Need parallel worker threads.");
  5568   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
  5570   _g1h->set_par_threads(_active_workers);
  5571   _workers->run_task(&enq_task_proxy);
  5572   _g1h->set_par_threads(0);
  5575 // End of weak reference support closures
  5577 // Abstract task used to preserve (i.e. copy) any referent objects
  5578 // that are in the collection set and are pointed to by reference
  5579 // objects discovered by the CM ref processor.
  5581 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
  5582 protected:
  5583   G1CollectedHeap* _g1h;
  5584   RefToScanQueueSet      *_queues;
  5585   ParallelTaskTerminator _terminator;
  5586   uint _n_workers;
  5588 public:
  5589   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
  5590     AbstractGangTask("ParPreserveCMReferents"),
  5591     _g1h(g1h),
  5592     _queues(task_queues),
  5593     _terminator(workers, _queues),
  5594     _n_workers(workers)
  5595   { }
  5597   void work(uint worker_id) {
  5598     ResourceMark rm;
  5599     HandleMark   hm;
  5601     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
  5602     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
  5603     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
  5604     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
  5606     pss.set_evac_closure(&scan_evac_cl);
  5607     pss.set_evac_failure_closure(&evac_failure_cl);
  5608     pss.set_partial_scan_closure(&partial_scan_cl);
  5610     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
  5613     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
  5614     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
  5616     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
  5617     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
  5619     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5620     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
  5622     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5623       // We also need to mark copied objects.
  5624       copy_non_heap_cl = &copy_mark_non_heap_cl;
  5625       copy_metadata_cl = &copy_mark_metadata_cl;
  5628     // Is alive closure
  5629     G1AlwaysAliveClosure always_alive(_g1h);
  5631     // Copying keep alive closure. Applied to referent objects that need
  5632     // to be copied.
  5633     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
  5635     ReferenceProcessor* rp = _g1h->ref_processor_cm();
  5637     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
  5638     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
  5640     // limit is set using max_num_q() - which was set using ParallelGCThreads.
  5641     // So this must be true - but assert just in case someone decides to
  5642     // change the worker ids.
  5643     assert(0 <= worker_id && worker_id < limit, "sanity");
  5644     assert(!rp->discovery_is_atomic(), "check this code");
  5646     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
  5647     for (uint idx = worker_id; idx < limit; idx += stride) {
  5648       DiscoveredList& ref_list = rp->discovered_refs()[idx];
  5650       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
  5651       while (iter.has_next()) {
  5652         // Since discovery is not atomic for the CM ref processor, we
  5653         // can see some null referent objects.
  5654         iter.load_ptrs(DEBUG_ONLY(true));
  5655         oop ref = iter.obj();
  5657         // This will filter nulls.
  5658         if (iter.is_referent_alive()) {
  5659           iter.make_referent_alive();
  5661         iter.move_to_next();
  5665     // Drain the queue - which may cause stealing
  5666     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
  5667     drain_queue.do_void();
  5668     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
  5669     assert(pss.refs()->is_empty(), "should be");
  5671 };
  5673 // Weak Reference processing during an evacuation pause (part 1).
  5674 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
  5675   double ref_proc_start = os::elapsedTime();
  5677   ReferenceProcessor* rp = _ref_processor_stw;
  5678   assert(rp->discovery_enabled(), "should have been enabled");
  5680   // Any reference objects, in the collection set, that were 'discovered'
  5681   // by the CM ref processor should have already been copied (either by
  5682   // applying the external root copy closure to the discovered lists, or
  5683   // by following an RSet entry).
  5684   //
  5685   // But some of the referents, that are in the collection set, that these
  5686   // reference objects point to may not have been copied: the STW ref
  5687   // processor would have seen that the reference object had already
  5688   // been 'discovered' and would have skipped discovering the reference,
  5689   // but would not have treated the reference object as a regular oop.
  5690   // As a result the copy closure would not have been applied to the
  5691   // referent object.
  5692   //
  5693   // We need to explicitly copy these referent objects - the references
  5694   // will be processed at the end of remarking.
  5695   //
  5696   // We also need to do this copying before we process the reference
  5697   // objects discovered by the STW ref processor in case one of these
  5698   // referents points to another object which is also referenced by an
  5699   // object discovered by the STW ref processor.
  5701   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
  5702            no_of_gc_workers == workers()->active_workers(),
  5703            "Need to reset active GC workers");
  5705   set_par_threads(no_of_gc_workers);
  5706   G1ParPreserveCMReferentsTask keep_cm_referents(this,
  5707                                                  no_of_gc_workers,
  5708                                                  _task_queues);
  5710   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5711     workers()->run_task(&keep_cm_referents);
  5712   } else {
  5713     keep_cm_referents.work(0);
  5716   set_par_threads(0);
  5718   // Closure to test whether a referent is alive.
  5719   G1STWIsAliveClosure is_alive(this);
  5721   // Even when parallel reference processing is enabled, the processing
  5722   // of JNI refs is serial and performed serially by the current thread
  5723   // rather than by a worker. The following PSS will be used for processing
  5724   // JNI refs.
  5726   // Use only a single queue for this PSS.
  5727   G1ParScanThreadState            pss(this, 0, NULL);
  5729   // We do not embed a reference processor in the copying/scanning
  5730   // closures while we're actually processing the discovered
  5731   // reference objects.
  5732   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
  5733   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
  5734   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
  5736   pss.set_evac_closure(&scan_evac_cl);
  5737   pss.set_evac_failure_closure(&evac_failure_cl);
  5738   pss.set_partial_scan_closure(&partial_scan_cl);
  5740   assert(pss.refs()->is_empty(), "pre-condition");
  5742   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
  5743   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
  5745   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
  5746   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
  5748   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5749   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
  5751   if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5752     // We also need to mark copied objects.
  5753     copy_non_heap_cl = &copy_mark_non_heap_cl;
  5754     copy_metadata_cl = &copy_mark_metadata_cl;
  5757   // Keep alive closure.
  5758   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
  5760   // Serial Complete GC closure
  5761   G1STWDrainQueueClosure drain_queue(this, &pss);
  5763   // Setup the soft refs policy...
  5764   rp->setup_policy(false);
  5766   ReferenceProcessorStats stats;
  5767   if (!rp->processing_is_mt()) {
  5768     // Serial reference processing...
  5769     stats = rp->process_discovered_references(&is_alive,
  5770                                               &keep_alive,
  5771                                               &drain_queue,
  5772                                               NULL,
  5773                                               _gc_timer_stw);
  5774   } else {
  5775     // Parallel reference processing
  5776     assert(rp->num_q() == no_of_gc_workers, "sanity");
  5777     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
  5779     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
  5780     stats = rp->process_discovered_references(&is_alive,
  5781                                               &keep_alive,
  5782                                               &drain_queue,
  5783                                               &par_task_executor,
  5784                                               _gc_timer_stw);
  5787   _gc_tracer_stw->report_gc_reference_stats(stats);
  5788   // We have completed copying any necessary live referent objects
  5789   // (that were not copied during the actual pause) so we can
  5790   // retire any active alloc buffers
  5791   pss.retire_alloc_buffers();
  5792   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
  5794   double ref_proc_time = os::elapsedTime() - ref_proc_start;
  5795   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
  5798 // Weak Reference processing during an evacuation pause (part 2).
  5799 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
  5800   double ref_enq_start = os::elapsedTime();
  5802   ReferenceProcessor* rp = _ref_processor_stw;
  5803   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
  5805   // Now enqueue any remaining on the discovered lists on to
  5806   // the pending list.
  5807   if (!rp->processing_is_mt()) {
  5808     // Serial reference processing...
  5809     rp->enqueue_discovered_references();
  5810   } else {
  5811     // Parallel reference enqueueing
  5813     assert(no_of_gc_workers == workers()->active_workers(),
  5814            "Need to reset active workers");
  5815     assert(rp->num_q() == no_of_gc_workers, "sanity");
  5816     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
  5818     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
  5819     rp->enqueue_discovered_references(&par_task_executor);
  5822   rp->verify_no_references_recorded();
  5823   assert(!rp->discovery_enabled(), "should have been disabled");
  5825   // FIXME
  5826   // CM's reference processing also cleans up the string and symbol tables.
  5827   // Should we do that here also? We could, but it is a serial operation
  5828   // and could significantly increase the pause time.
  5830   double ref_enq_time = os::elapsedTime() - ref_enq_start;
  5831   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
  5834 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
  5835   _expand_heap_after_alloc_failure = true;
  5836   _evacuation_failed = false;
  5838   // Should G1EvacuationFailureALot be in effect for this GC?
  5839   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
  5841   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  5843   // Disable the hot card cache.
  5844   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
  5845   hot_card_cache->reset_hot_cache_claimed_index();
  5846   hot_card_cache->set_use_cache(false);
  5848   uint n_workers;
  5849   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5850     n_workers =
  5851       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  5852                                      workers()->active_workers(),
  5853                                      Threads::number_of_non_daemon_threads());
  5854     assert(UseDynamicNumberOfGCThreads ||
  5855            n_workers == workers()->total_workers(),
  5856            "If not dynamic should be using all the  workers");
  5857     workers()->set_active_workers(n_workers);
  5858     set_par_threads(n_workers);
  5859   } else {
  5860     assert(n_par_threads() == 0,
  5861            "Should be the original non-parallel value");
  5862     n_workers = 1;
  5865   G1ParTask g1_par_task(this, _task_queues);
  5867   init_for_evac_failure(NULL);
  5869   rem_set()->prepare_for_younger_refs_iterate(true);
  5871   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  5872   double start_par_time_sec = os::elapsedTime();
  5873   double end_par_time_sec;
  5876     StrongRootsScope srs(this);
  5878     if (G1CollectedHeap::use_parallel_gc_threads()) {
  5879       // The individual threads will set their evac-failure closures.
  5880       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  5881       // These tasks use ShareHeap::_process_strong_tasks
  5882       assert(UseDynamicNumberOfGCThreads ||
  5883              workers()->active_workers() == workers()->total_workers(),
  5884              "If not dynamic should be using all the  workers");
  5885       workers()->run_task(&g1_par_task);
  5886     } else {
  5887       g1_par_task.set_for_termination(n_workers);
  5888       g1_par_task.work(0);
  5890     end_par_time_sec = os::elapsedTime();
  5892     // Closing the inner scope will execute the destructor
  5893     // for the StrongRootsScope object. We record the current
  5894     // elapsed time before closing the scope so that time
  5895     // taken for the SRS destructor is NOT included in the
  5896     // reported parallel time.
  5899   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
  5900   g1_policy()->phase_times()->record_par_time(par_time_ms);
  5902   double code_root_fixup_time_ms =
  5903         (os::elapsedTime() - end_par_time_sec) * 1000.0;
  5904   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
  5906   set_par_threads(0);
  5908   // Process any discovered reference objects - we have
  5909   // to do this _before_ we retire the GC alloc regions
  5910   // as we may have to copy some 'reachable' referent
  5911   // objects (and their reachable sub-graphs) that were
  5912   // not copied during the pause.
  5913   process_discovered_references(n_workers);
  5915   // Weak root processing.
  5917     G1STWIsAliveClosure is_alive(this);
  5918     G1KeepAliveClosure keep_alive(this);
  5919     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  5922   release_gc_alloc_regions(n_workers, evacuation_info);
  5923   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  5925   // Reset and re-enable the hot card cache.
  5926   // Note the counts for the cards in the regions in the
  5927   // collection set are reset when the collection set is freed.
  5928   hot_card_cache->reset_hot_cache();
  5929   hot_card_cache->set_use_cache(true);
  5931   // Migrate the strong code roots attached to each region in
  5932   // the collection set. Ideally we would like to do this
  5933   // after we have finished the scanning/evacuation of the
  5934   // strong code roots for a particular heap region.
  5935   migrate_strong_code_roots();
  5937   purge_code_root_memory();
  5939   if (g1_policy()->during_initial_mark_pause()) {
  5940     // Reset the claim values set during marking the strong code roots
  5941     reset_heap_region_claim_values();
  5944   finalize_for_evac_failure();
  5946   if (evacuation_failed()) {
  5947     remove_self_forwarding_pointers();
  5949     // Reset the G1EvacuationFailureALot counters and flags
  5950     // Note: the values are reset only when an actual
  5951     // evacuation failure occurs.
  5952     NOT_PRODUCT(reset_evacuation_should_fail();)
  5955   // Enqueue any remaining references remaining on the STW
  5956   // reference processor's discovered lists. We need to do
  5957   // this after the card table is cleaned (and verified) as
  5958   // the act of enqueueing entries on to the pending list
  5959   // will log these updates (and dirty their associated
  5960   // cards). We need these updates logged to update any
  5961   // RSets.
  5962   enqueue_discovered_references(n_workers);
  5964   if (G1DeferredRSUpdate) {
  5965     RedirtyLoggedCardTableEntryFastClosure redirty;
  5966     dirty_card_queue_set().set_closure(&redirty);
  5967     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  5969     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
  5970     dcq.merge_bufferlists(&dirty_card_queue_set());
  5971     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  5973   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  5976 void G1CollectedHeap::free_region(HeapRegion* hr,
  5977                                   FreeRegionList* free_list,
  5978                                   bool par,
  5979                                   bool locked) {
  5980   assert(!hr->isHumongous(), "this is only for non-humongous regions");
  5981   assert(!hr->is_empty(), "the region should not be empty");
  5982   assert(free_list != NULL, "pre-condition");
  5984   // Clear the card counts for this region.
  5985   // Note: we only need to do this if the region is not young
  5986   // (since we don't refine cards in young regions).
  5987   if (!hr->is_young()) {
  5988     _cg1r->hot_card_cache()->reset_card_counts(hr);
  5990   hr->hr_clear(par, true /* clear_space */, locked /* locked */);
  5991   free_list->add_as_head(hr);
  5994 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
  5995                                      FreeRegionList* free_list,
  5996                                      bool par) {
  5997   assert(hr->startsHumongous(), "this is only for starts humongous regions");
  5998   assert(free_list != NULL, "pre-condition");
  6000   size_t hr_capacity = hr->capacity();
  6001   // We need to read this before we make the region non-humongous,
  6002   // otherwise the information will be gone.
  6003   uint last_index = hr->last_hc_index();
  6004   hr->set_notHumongous();
  6005   free_region(hr, free_list, par);
  6007   uint i = hr->hrs_index() + 1;
  6008   while (i < last_index) {
  6009     HeapRegion* curr_hr = region_at(i);
  6010     assert(curr_hr->continuesHumongous(), "invariant");
  6011     curr_hr->set_notHumongous();
  6012     free_region(curr_hr, free_list, par);
  6013     i += 1;
  6017 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
  6018                                        const HeapRegionSetCount& humongous_regions_removed) {
  6019   if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
  6020     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
  6021     _old_set.bulk_remove(old_regions_removed);
  6022     _humongous_set.bulk_remove(humongous_regions_removed);
  6027 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
  6028   assert(list != NULL, "list can't be null");
  6029   if (!list->is_empty()) {
  6030     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  6031     _free_list.add_as_head(list);
  6035 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
  6036   assert(_summary_bytes_used >= bytes,
  6037          err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
  6038                   _summary_bytes_used, bytes));
  6039   _summary_bytes_used -= bytes;
  6042 class G1ParCleanupCTTask : public AbstractGangTask {
  6043   G1SATBCardTableModRefBS* _ct_bs;
  6044   G1CollectedHeap* _g1h;
  6045   HeapRegion* volatile _su_head;
  6046 public:
  6047   G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
  6048                      G1CollectedHeap* g1h) :
  6049     AbstractGangTask("G1 Par Cleanup CT Task"),
  6050     _ct_bs(ct_bs), _g1h(g1h) { }
  6052   void work(uint worker_id) {
  6053     HeapRegion* r;
  6054     while (r = _g1h->pop_dirty_cards_region()) {
  6055       clear_cards(r);
  6059   void clear_cards(HeapRegion* r) {
  6060     // Cards of the survivors should have already been dirtied.
  6061     if (!r->is_survivor()) {
  6062       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  6065 };
  6067 #ifndef PRODUCT
  6068 class G1VerifyCardTableCleanup: public HeapRegionClosure {
  6069   G1CollectedHeap* _g1h;
  6070   G1SATBCardTableModRefBS* _ct_bs;
  6071 public:
  6072   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
  6073     : _g1h(g1h), _ct_bs(ct_bs) { }
  6074   virtual bool doHeapRegion(HeapRegion* r) {
  6075     if (r->is_survivor()) {
  6076       _g1h->verify_dirty_region(r);
  6077     } else {
  6078       _g1h->verify_not_dirty_region(r);
  6080     return false;
  6082 };
  6084 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
  6085   // All of the region should be clean.
  6086   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
  6087   MemRegion mr(hr->bottom(), hr->end());
  6088   ct_bs->verify_not_dirty_region(mr);
  6091 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
  6092   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
  6093   // dirty allocated blocks as they allocate them. The thread that
  6094   // retires each region and replaces it with a new one will do a
  6095   // maximal allocation to fill in [pre_dummy_top(),end()] but will
  6096   // not dirty that area (one less thing to have to do while holding
  6097   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
  6098   // is dirty.
  6099   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
  6100   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
  6101   if (hr->is_young()) {
  6102     ct_bs->verify_g1_young_region(mr);
  6103   } else {
  6104     ct_bs->verify_dirty_region(mr);
  6108 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
  6109   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
  6110   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
  6111     verify_dirty_region(hr);
  6115 void G1CollectedHeap::verify_dirty_young_regions() {
  6116   verify_dirty_young_list(_young_list->first_region());
  6118 #endif
  6120 void G1CollectedHeap::cleanUpCardTable() {
  6121   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
  6122   double start = os::elapsedTime();
  6125     // Iterate over the dirty cards region list.
  6126     G1ParCleanupCTTask cleanup_task(ct_bs, this);
  6128     if (G1CollectedHeap::use_parallel_gc_threads()) {
  6129       set_par_threads();
  6130       workers()->run_task(&cleanup_task);
  6131       set_par_threads(0);
  6132     } else {
  6133       while (_dirty_cards_region_list) {
  6134         HeapRegion* r = _dirty_cards_region_list;
  6135         cleanup_task.clear_cards(r);
  6136         _dirty_cards_region_list = r->get_next_dirty_cards_region();
  6137         if (_dirty_cards_region_list == r) {
  6138           // The last region.
  6139           _dirty_cards_region_list = NULL;
  6141         r->set_next_dirty_cards_region(NULL);
  6144 #ifndef PRODUCT
  6145     if (G1VerifyCTCleanup || VerifyAfterGC) {
  6146       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
  6147       heap_region_iterate(&cleanup_verifier);
  6149 #endif
  6152   double elapsed = os::elapsedTime() - start;
  6153   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
  6156 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
  6157   size_t pre_used = 0;
  6158   FreeRegionList local_free_list("Local List for CSet Freeing");
  6160   double young_time_ms     = 0.0;
  6161   double non_young_time_ms = 0.0;
  6163   // Since the collection set is a superset of the the young list,
  6164   // all we need to do to clear the young list is clear its
  6165   // head and length, and unlink any young regions in the code below
  6166   _young_list->clear();
  6168   G1CollectorPolicy* policy = g1_policy();
  6170   double start_sec = os::elapsedTime();
  6171   bool non_young = true;
  6173   HeapRegion* cur = cs_head;
  6174   int age_bound = -1;
  6175   size_t rs_lengths = 0;
  6177   while (cur != NULL) {
  6178     assert(!is_on_master_free_list(cur), "sanity");
  6179     if (non_young) {
  6180       if (cur->is_young()) {
  6181         double end_sec = os::elapsedTime();
  6182         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  6183         non_young_time_ms += elapsed_ms;
  6185         start_sec = os::elapsedTime();
  6186         non_young = false;
  6188     } else {
  6189       if (!cur->is_young()) {
  6190         double end_sec = os::elapsedTime();
  6191         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  6192         young_time_ms += elapsed_ms;
  6194         start_sec = os::elapsedTime();
  6195         non_young = true;
  6199     rs_lengths += cur->rem_set()->occupied_locked();
  6201     HeapRegion* next = cur->next_in_collection_set();
  6202     assert(cur->in_collection_set(), "bad CS");
  6203     cur->set_next_in_collection_set(NULL);
  6204     cur->set_in_collection_set(false);
  6206     if (cur->is_young()) {
  6207       int index = cur->young_index_in_cset();
  6208       assert(index != -1, "invariant");
  6209       assert((uint) index < policy->young_cset_region_length(), "invariant");
  6210       size_t words_survived = _surviving_young_words[index];
  6211       cur->record_surv_words_in_group(words_survived);
  6213       // At this point the we have 'popped' cur from the collection set
  6214       // (linked via next_in_collection_set()) but it is still in the
  6215       // young list (linked via next_young_region()). Clear the
  6216       // _next_young_region field.
  6217       cur->set_next_young_region(NULL);
  6218     } else {
  6219       int index = cur->young_index_in_cset();
  6220       assert(index == -1, "invariant");
  6223     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  6224             (!cur->is_young() && cur->young_index_in_cset() == -1),
  6225             "invariant" );
  6227     if (!cur->evacuation_failed()) {
  6228       MemRegion used_mr = cur->used_region();
  6230       // And the region is empty.
  6231       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
  6232       pre_used += cur->used();
  6233       free_region(cur, &local_free_list, false /* par */, true /* locked */);
  6234     } else {
  6235       cur->uninstall_surv_rate_group();
  6236       if (cur->is_young()) {
  6237         cur->set_young_index_in_cset(-1);
  6239       cur->set_not_young();
  6240       cur->set_evacuation_failed(false);
  6241       // The region is now considered to be old.
  6242       _old_set.add(cur);
  6243       evacuation_info.increment_collectionset_used_after(cur->used());
  6245     cur = next;
  6248   evacuation_info.set_regions_freed(local_free_list.length());
  6249   policy->record_max_rs_lengths(rs_lengths);
  6250   policy->cset_regions_freed();
  6252   double end_sec = os::elapsedTime();
  6253   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  6255   if (non_young) {
  6256     non_young_time_ms += elapsed_ms;
  6257   } else {
  6258     young_time_ms += elapsed_ms;
  6261   prepend_to_freelist(&local_free_list);
  6262   decrement_summary_bytes(pre_used);
  6263   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
  6264   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
  6267 // This routine is similar to the above but does not record
  6268 // any policy statistics or update free lists; we are abandoning
  6269 // the current incremental collection set in preparation of a
  6270 // full collection. After the full GC we will start to build up
  6271 // the incremental collection set again.
  6272 // This is only called when we're doing a full collection
  6273 // and is immediately followed by the tearing down of the young list.
  6275 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
  6276   HeapRegion* cur = cs_head;
  6278   while (cur != NULL) {
  6279     HeapRegion* next = cur->next_in_collection_set();
  6280     assert(cur->in_collection_set(), "bad CS");
  6281     cur->set_next_in_collection_set(NULL);
  6282     cur->set_in_collection_set(false);
  6283     cur->set_young_index_in_cset(-1);
  6284     cur = next;
  6288 void G1CollectedHeap::set_free_regions_coming() {
  6289   if (G1ConcRegionFreeingVerbose) {
  6290     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  6291                            "setting free regions coming");
  6294   assert(!free_regions_coming(), "pre-condition");
  6295   _free_regions_coming = true;
  6298 void G1CollectedHeap::reset_free_regions_coming() {
  6299   assert(free_regions_coming(), "pre-condition");
  6302     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6303     _free_regions_coming = false;
  6304     SecondaryFreeList_lock->notify_all();
  6307   if (G1ConcRegionFreeingVerbose) {
  6308     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  6309                            "reset free regions coming");
  6313 void G1CollectedHeap::wait_while_free_regions_coming() {
  6314   // Most of the time we won't have to wait, so let's do a quick test
  6315   // first before we take the lock.
  6316   if (!free_regions_coming()) {
  6317     return;
  6320   if (G1ConcRegionFreeingVerbose) {
  6321     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  6322                            "waiting for free regions");
  6326     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6327     while (free_regions_coming()) {
  6328       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
  6332   if (G1ConcRegionFreeingVerbose) {
  6333     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  6334                            "done waiting for free regions");
  6338 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  6339   assert(heap_lock_held_for_gc(),
  6340               "the heap lock should already be held by or for this thread");
  6341   _young_list->push_region(hr);
  6344 class NoYoungRegionsClosure: public HeapRegionClosure {
  6345 private:
  6346   bool _success;
  6347 public:
  6348   NoYoungRegionsClosure() : _success(true) { }
  6349   bool doHeapRegion(HeapRegion* r) {
  6350     if (r->is_young()) {
  6351       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  6352                              r->bottom(), r->end());
  6353       _success = false;
  6355     return false;
  6357   bool success() { return _success; }
  6358 };
  6360 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
  6361   bool ret = _young_list->check_list_empty(check_sample);
  6363   if (check_heap) {
  6364     NoYoungRegionsClosure closure;
  6365     heap_region_iterate(&closure);
  6366     ret = ret && closure.success();
  6369   return ret;
  6372 class TearDownRegionSetsClosure : public HeapRegionClosure {
  6373 private:
  6374   HeapRegionSet *_old_set;
  6376 public:
  6377   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
  6379   bool doHeapRegion(HeapRegion* r) {
  6380     if (r->is_empty()) {
  6381       // We ignore empty regions, we'll empty the free list afterwards
  6382     } else if (r->is_young()) {
  6383       // We ignore young regions, we'll empty the young list afterwards
  6384     } else if (r->isHumongous()) {
  6385       // We ignore humongous regions, we're not tearing down the
  6386       // humongous region set
  6387     } else {
  6388       // The rest should be old
  6389       _old_set->remove(r);
  6391     return false;
  6394   ~TearDownRegionSetsClosure() {
  6395     assert(_old_set->is_empty(), "post-condition");
  6397 };
  6399 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
  6400   assert_at_safepoint(true /* should_be_vm_thread */);
  6402   if (!free_list_only) {
  6403     TearDownRegionSetsClosure cl(&_old_set);
  6404     heap_region_iterate(&cl);
  6406     // Need to do this after the heap iteration to be able to
  6407     // recognize the young regions and ignore them during the iteration.
  6408     _young_list->empty_list();
  6410   _free_list.remove_all();
  6413 class RebuildRegionSetsClosure : public HeapRegionClosure {
  6414 private:
  6415   bool            _free_list_only;
  6416   HeapRegionSet*   _old_set;
  6417   FreeRegionList* _free_list;
  6418   size_t          _total_used;
  6420 public:
  6421   RebuildRegionSetsClosure(bool free_list_only,
  6422                            HeapRegionSet* old_set, FreeRegionList* free_list) :
  6423     _free_list_only(free_list_only),
  6424     _old_set(old_set), _free_list(free_list), _total_used(0) {
  6425     assert(_free_list->is_empty(), "pre-condition");
  6426     if (!free_list_only) {
  6427       assert(_old_set->is_empty(), "pre-condition");
  6431   bool doHeapRegion(HeapRegion* r) {
  6432     if (r->continuesHumongous()) {
  6433       return false;
  6436     if (r->is_empty()) {
  6437       // Add free regions to the free list
  6438       _free_list->add_as_tail(r);
  6439     } else if (!_free_list_only) {
  6440       assert(!r->is_young(), "we should not come across young regions");
  6442       if (r->isHumongous()) {
  6443         // We ignore humongous regions, we left the humongous set unchanged
  6444       } else {
  6445         // The rest should be old, add them to the old set
  6446         _old_set->add(r);
  6448       _total_used += r->used();
  6451     return false;
  6454   size_t total_used() {
  6455     return _total_used;
  6457 };
  6459 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
  6460   assert_at_safepoint(true /* should_be_vm_thread */);
  6462   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
  6463   heap_region_iterate(&cl);
  6465   if (!free_list_only) {
  6466     _summary_bytes_used = cl.total_used();
  6468   assert(_summary_bytes_used == recalculate_used(),
  6469          err_msg("inconsistent _summary_bytes_used, "
  6470                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
  6471                  _summary_bytes_used, recalculate_used()));
  6474 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  6475   _refine_cte_cl->set_concurrent(concurrent);
  6478 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  6479   HeapRegion* hr = heap_region_containing(p);
  6480   if (hr == NULL) {
  6481     return false;
  6482   } else {
  6483     return hr->is_in(p);
  6487 // Methods for the mutator alloc region
  6489 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
  6490                                                       bool force) {
  6491   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6492   assert(!force || g1_policy()->can_expand_young_list(),
  6493          "if force is true we should be able to expand the young list");
  6494   bool young_list_full = g1_policy()->is_young_list_full();
  6495   if (force || !young_list_full) {
  6496     HeapRegion* new_alloc_region = new_region(word_size,
  6497                                               false /* do_expand */);
  6498     if (new_alloc_region != NULL) {
  6499       set_region_short_lived_locked(new_alloc_region);
  6500       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
  6501       return new_alloc_region;
  6504   return NULL;
  6507 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
  6508                                                   size_t allocated_bytes) {
  6509   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6510   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
  6512   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
  6513   _summary_bytes_used += allocated_bytes;
  6514   _hr_printer.retire(alloc_region);
  6515   // We update the eden sizes here, when the region is retired,
  6516   // instead of when it's allocated, since this is the point that its
  6517   // used space has been recored in _summary_bytes_used.
  6518   g1mm()->update_eden_size();
  6521 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
  6522                                                     bool force) {
  6523   return _g1h->new_mutator_alloc_region(word_size, force);
  6526 void G1CollectedHeap::set_par_threads() {
  6527   // Don't change the number of workers.  Use the value previously set
  6528   // in the workgroup.
  6529   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
  6530   uint n_workers = workers()->active_workers();
  6531   assert(UseDynamicNumberOfGCThreads ||
  6532            n_workers == workers()->total_workers(),
  6533       "Otherwise should be using the total number of workers");
  6534   if (n_workers == 0) {
  6535     assert(false, "Should have been set in prior evacuation pause.");
  6536     n_workers = ParallelGCThreads;
  6537     workers()->set_active_workers(n_workers);
  6539   set_par_threads(n_workers);
  6542 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
  6543                                        size_t allocated_bytes) {
  6544   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
  6547 // Methods for the GC alloc regions
  6549 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
  6550                                                  uint count,
  6551                                                  GCAllocPurpose ap) {
  6552   assert(FreeList_lock->owned_by_self(), "pre-condition");
  6554   if (count < g1_policy()->max_regions(ap)) {
  6555     HeapRegion* new_alloc_region = new_region(word_size,
  6556                                               true /* do_expand */);
  6557     if (new_alloc_region != NULL) {
  6558       // We really only need to do this for old regions given that we
  6559       // should never scan survivors. But it doesn't hurt to do it
  6560       // for survivors too.
  6561       new_alloc_region->set_saved_mark();
  6562       if (ap == GCAllocForSurvived) {
  6563         new_alloc_region->set_survivor();
  6564         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
  6565       } else {
  6566         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
  6568       bool during_im = g1_policy()->during_initial_mark_pause();
  6569       new_alloc_region->note_start_of_copying(during_im);
  6570       return new_alloc_region;
  6571     } else {
  6572       g1_policy()->note_alloc_region_limit_reached(ap);
  6575   return NULL;
  6578 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
  6579                                              size_t allocated_bytes,
  6580                                              GCAllocPurpose ap) {
  6581   bool during_im = g1_policy()->during_initial_mark_pause();
  6582   alloc_region->note_end_of_copying(during_im);
  6583   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
  6584   if (ap == GCAllocForSurvived) {
  6585     young_list()->add_survivor_region(alloc_region);
  6586   } else {
  6587     _old_set.add(alloc_region);
  6589   _hr_printer.retire(alloc_region);
  6592 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
  6593                                                        bool force) {
  6594   assert(!force, "not supported for GC alloc regions");
  6595   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
  6598 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
  6599                                           size_t allocated_bytes) {
  6600   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
  6601                                GCAllocForSurvived);
  6604 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
  6605                                                   bool force) {
  6606   assert(!force, "not supported for GC alloc regions");
  6607   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
  6610 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
  6611                                      size_t allocated_bytes) {
  6612   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
  6613                                GCAllocForTenured);
  6615 // Heap region set verification
  6617 class VerifyRegionListsClosure : public HeapRegionClosure {
  6618 private:
  6619   HeapRegionSet*   _old_set;
  6620   HeapRegionSet*   _humongous_set;
  6621   FreeRegionList*  _free_list;
  6623 public:
  6624   HeapRegionSetCount _old_count;
  6625   HeapRegionSetCount _humongous_count;
  6626   HeapRegionSetCount _free_count;
  6628   VerifyRegionListsClosure(HeapRegionSet* old_set,
  6629                            HeapRegionSet* humongous_set,
  6630                            FreeRegionList* free_list) :
  6631     _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list),
  6632     _old_count(), _humongous_count(), _free_count(){ }
  6634   bool doHeapRegion(HeapRegion* hr) {
  6635     if (hr->continuesHumongous()) {
  6636       return false;
  6639     if (hr->is_young()) {
  6640       // TODO
  6641     } else if (hr->startsHumongous()) {
  6642       assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
  6643       _humongous_count.increment(1u, hr->capacity());
  6644     } else if (hr->is_empty()) {
  6645       assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
  6646       _free_count.increment(1u, hr->capacity());
  6647     } else {
  6648       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
  6649       _old_count.increment(1u, hr->capacity());
  6651     return false;
  6654   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) {
  6655     guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
  6656     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  6657         old_set->total_capacity_bytes(), _old_count.capacity()));
  6659     guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
  6660     guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  6661         humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
  6663     guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length()));
  6664     guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  6665         free_list->total_capacity_bytes(), _free_count.capacity()));
  6667 };
  6669 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
  6670                                              HeapWord* bottom) {
  6671   HeapWord* end = bottom + HeapRegion::GrainWords;
  6672   MemRegion mr(bottom, end);
  6673   assert(_g1_reserved.contains(mr), "invariant");
  6674   // This might return NULL if the allocation fails
  6675   return new HeapRegion(hrs_index, _bot_shared, mr);
  6678 void G1CollectedHeap::verify_region_sets() {
  6679   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6681   // First, check the explicit lists.
  6682   _free_list.verify_list();
  6684     // Given that a concurrent operation might be adding regions to
  6685     // the secondary free list we have to take the lock before
  6686     // verifying it.
  6687     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6688     _secondary_free_list.verify_list();
  6691   // If a concurrent region freeing operation is in progress it will
  6692   // be difficult to correctly attributed any free regions we come
  6693   // across to the correct free list given that they might belong to
  6694   // one of several (free_list, secondary_free_list, any local lists,
  6695   // etc.). So, if that's the case we will skip the rest of the
  6696   // verification operation. Alternatively, waiting for the concurrent
  6697   // operation to complete will have a non-trivial effect on the GC's
  6698   // operation (no concurrent operation will last longer than the
  6699   // interval between two calls to verification) and it might hide
  6700   // any issues that we would like to catch during testing.
  6701   if (free_regions_coming()) {
  6702     return;
  6705   // Make sure we append the secondary_free_list on the free_list so
  6706   // that all free regions we will come across can be safely
  6707   // attributed to the free_list.
  6708   append_secondary_free_list_if_not_empty_with_lock();
  6710   // Finally, make sure that the region accounting in the lists is
  6711   // consistent with what we see in the heap.
  6713   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
  6714   heap_region_iterate(&cl);
  6715   cl.verify_counts(&_old_set, &_humongous_set, &_free_list);
  6718 // Optimized nmethod scanning
  6720 class RegisterNMethodOopClosure: public OopClosure {
  6721   G1CollectedHeap* _g1h;
  6722   nmethod* _nm;
  6724   template <class T> void do_oop_work(T* p) {
  6725     T heap_oop = oopDesc::load_heap_oop(p);
  6726     if (!oopDesc::is_null(heap_oop)) {
  6727       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  6728       HeapRegion* hr = _g1h->heap_region_containing(obj);
  6729       assert(!hr->continuesHumongous(),
  6730              err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
  6731                      " starting at "HR_FORMAT,
  6732                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
  6734       // HeapRegion::add_strong_code_root() avoids adding duplicate
  6735       // entries but having duplicates is  OK since we "mark" nmethods
  6736       // as visited when we scan the strong code root lists during the GC.
  6737       hr->add_strong_code_root(_nm);
  6738       assert(hr->rem_set()->strong_code_roots_list_contains(_nm),
  6739              err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT,
  6740                      _nm, HR_FORMAT_PARAMS(hr)));
  6744 public:
  6745   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
  6746     _g1h(g1h), _nm(nm) {}
  6748   void do_oop(oop* p)       { do_oop_work(p); }
  6749   void do_oop(narrowOop* p) { do_oop_work(p); }
  6750 };
  6752 class UnregisterNMethodOopClosure: public OopClosure {
  6753   G1CollectedHeap* _g1h;
  6754   nmethod* _nm;
  6756   template <class T> void do_oop_work(T* p) {
  6757     T heap_oop = oopDesc::load_heap_oop(p);
  6758     if (!oopDesc::is_null(heap_oop)) {
  6759       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  6760       HeapRegion* hr = _g1h->heap_region_containing(obj);
  6761       assert(!hr->continuesHumongous(),
  6762              err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
  6763                      " starting at "HR_FORMAT,
  6764                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
  6766       hr->remove_strong_code_root(_nm);
  6767       assert(!hr->rem_set()->strong_code_roots_list_contains(_nm),
  6768              err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT,
  6769                      _nm, HR_FORMAT_PARAMS(hr)));
  6773 public:
  6774   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
  6775     _g1h(g1h), _nm(nm) {}
  6777   void do_oop(oop* p)       { do_oop_work(p); }
  6778   void do_oop(narrowOop* p) { do_oop_work(p); }
  6779 };
  6781 void G1CollectedHeap::register_nmethod(nmethod* nm) {
  6782   CollectedHeap::register_nmethod(nm);
  6784   guarantee(nm != NULL, "sanity");
  6785   RegisterNMethodOopClosure reg_cl(this, nm);
  6786   nm->oops_do(&reg_cl);
  6789 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
  6790   CollectedHeap::unregister_nmethod(nm);
  6792   guarantee(nm != NULL, "sanity");
  6793   UnregisterNMethodOopClosure reg_cl(this, nm);
  6794   nm->oops_do(&reg_cl, true);
  6797 class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
  6798 public:
  6799   bool doHeapRegion(HeapRegion *hr) {
  6800     assert(!hr->isHumongous(),
  6801            err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
  6802                    HR_FORMAT_PARAMS(hr)));
  6803     hr->migrate_strong_code_roots();
  6804     return false;
  6806 };
  6808 void G1CollectedHeap::migrate_strong_code_roots() {
  6809   MigrateCodeRootsHeapRegionClosure cl;
  6810   double migrate_start = os::elapsedTime();
  6811   collection_set_iterate(&cl);
  6812   double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
  6813   g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
  6816 void G1CollectedHeap::purge_code_root_memory() {
  6817   double purge_start = os::elapsedTime();
  6818   G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
  6819   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
  6820   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
  6823 // Mark all the code roots that point into regions *not* in the
  6824 // collection set.
  6825 //
  6826 // Note we do not want to use a "marking" CodeBlobToOopClosure while
  6827 // walking the the code roots lists of regions not in the collection
  6828 // set. Suppose we have an nmethod (M) that points to objects in two
  6829 // separate regions - one in the collection set (R1) and one not (R2).
  6830 // Using a "marking" CodeBlobToOopClosure here would result in "marking"
  6831 // nmethod M when walking the code roots for R1. When we come to scan
  6832 // the code roots for R2, we would see that M is already marked and it
  6833 // would be skipped and the objects in R2 that are referenced from M
  6834 // would not be evacuated.
  6836 class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
  6838   class MarkStrongCodeRootOopClosure: public OopClosure {
  6839     ConcurrentMark* _cm;
  6840     HeapRegion* _hr;
  6841     uint _worker_id;
  6843     template <class T> void do_oop_work(T* p) {
  6844       T heap_oop = oopDesc::load_heap_oop(p);
  6845       if (!oopDesc::is_null(heap_oop)) {
  6846         oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  6847         // Only mark objects in the region (which is assumed
  6848         // to be not in the collection set).
  6849         if (_hr->is_in(obj)) {
  6850           _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
  6855   public:
  6856     MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
  6857       _cm(cm), _hr(hr), _worker_id(worker_id) {
  6858       assert(!_hr->in_collection_set(), "sanity");
  6861     void do_oop(narrowOop* p) { do_oop_work(p); }
  6862     void do_oop(oop* p)       { do_oop_work(p); }
  6863   };
  6865   MarkStrongCodeRootOopClosure _oop_cl;
  6867 public:
  6868   MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
  6869     _oop_cl(cm, hr, worker_id) {}
  6871   void do_code_blob(CodeBlob* cb) {
  6872     nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
  6873     if (nm != NULL) {
  6874       nm->oops_do(&_oop_cl);
  6877 };
  6879 class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
  6880   G1CollectedHeap* _g1h;
  6881   uint _worker_id;
  6883 public:
  6884   MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
  6885     _g1h(g1h), _worker_id(worker_id) {}
  6887   bool doHeapRegion(HeapRegion *hr) {
  6888     HeapRegionRemSet* hrrs = hr->rem_set();
  6889     if (hr->continuesHumongous()) {
  6890       // Code roots should never be attached to a continuation of a humongous region
  6891       assert(hrrs->strong_code_roots_list_length() == 0,
  6892              err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
  6893                      " starting at "HR_FORMAT", but has "SIZE_FORMAT,
  6894                      HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
  6895                      hrrs->strong_code_roots_list_length()));
  6896       return false;
  6899     if (hr->in_collection_set()) {
  6900       // Don't mark code roots into regions in the collection set here.
  6901       // They will be marked when we scan them.
  6902       return false;
  6905     MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
  6906     hr->strong_code_roots_do(&cb_cl);
  6907     return false;
  6909 };
  6911 void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
  6912   MarkStrongCodeRootsHRClosure cl(this, worker_id);
  6913   if (G1CollectedHeap::use_parallel_gc_threads()) {
  6914     heap_region_par_iterate_chunked(&cl,
  6915                                     worker_id,
  6916                                     workers()->active_workers(),
  6917                                     HeapRegion::ParMarkRootClaimValue);
  6918   } else {
  6919     heap_region_iterate(&cl);
  6923 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
  6924   G1CollectedHeap* _g1h;
  6926 public:
  6927   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
  6928     _g1h(g1h) {}
  6930   void do_code_blob(CodeBlob* cb) {
  6931     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
  6932     if (nm == NULL) {
  6933       return;
  6936     if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) {
  6937       _g1h->register_nmethod(nm);
  6940 };
  6942 void G1CollectedHeap::rebuild_strong_code_roots() {
  6943   RebuildStrongCodeRootClosure blob_cl(this);
  6944   CodeCache::blobs_do(&blob_cl);

mercurial