src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Sat, 06 Oct 2012 01:17:44 -0700

author
johnc
date
Sat, 06 Oct 2012 01:17:44 -0700
changeset 4173
8a5ea0a9ccc4
parent 4130
2e6857353b2c
child 4174
04155d9c8c76
permissions
-rw-r--r--

7127708: G1: change task num types from int to uint in concurrent mark
Summary: Change the type of various task num fields, parameters etc to unsigned and rename them to be more consistent with the other collectors. Code changes were also reviewed by Vitaly Davidovich.
Reviewed-by: johnc
Contributed-by: Kaushik Srenevasan <kaushik@twitter.com>

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/icBuffer.hpp"
    27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
    28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    35 #include "gc_implementation/g1/g1EvacFailure.hpp"
    36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
    37 #include "gc_implementation/g1/g1Log.hpp"
    38 #include "gc_implementation/g1/g1MarkSweep.hpp"
    39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    40 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    41 #include "gc_implementation/g1/heapRegion.inline.hpp"
    42 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    44 #include "gc_implementation/g1/vm_operations_g1.hpp"
    45 #include "gc_implementation/shared/isGCActiveMark.hpp"
    46 #include "memory/gcLocker.inline.hpp"
    47 #include "memory/genOopClosures.inline.hpp"
    48 #include "memory/generationSpec.hpp"
    49 #include "memory/referenceProcessor.hpp"
    50 #include "oops/oop.inline.hpp"
    51 #include "oops/oop.pcgc.inline.hpp"
    52 #include "runtime/aprofiler.hpp"
    53 #include "runtime/vmThread.hpp"
    55 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
    57 // turn it on so that the contents of the young list (scan-only /
    58 // to-be-collected) are printed at "strategic" points before / during
    59 // / after the collection --- this is useful for debugging
    60 #define YOUNG_LIST_VERBOSE 0
    61 // CURRENT STATUS
    62 // This file is under construction.  Search for "FIXME".
    64 // INVARIANTS/NOTES
    65 //
    66 // All allocation activity covered by the G1CollectedHeap interface is
    67 // serialized by acquiring the HeapLock.  This happens in mem_allocate
    68 // and allocate_new_tlab, which are the "entry" points to the
    69 // allocation code from the rest of the JVM.  (Note that this does not
    70 // apply to TLAB allocation, which is not part of this interface: it
    71 // is done by clients of this interface.)
    73 // Notes on implementation of parallelism in different tasks.
    74 //
    75 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
    76 // The number of GC workers is passed to heap_region_par_iterate_chunked().
    77 // It does use run_task() which sets _n_workers in the task.
    78 // G1ParTask executes g1_process_strong_roots() ->
    79 // SharedHeap::process_strong_roots() which calls eventuall to
    80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
    81 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
    82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
    83 //
    85 // Local to this file.
    87 class RefineCardTableEntryClosure: public CardTableEntryClosure {
    88   SuspendibleThreadSet* _sts;
    89   G1RemSet* _g1rs;
    90   ConcurrentG1Refine* _cg1r;
    91   bool _concurrent;
    92 public:
    93   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
    94                               G1RemSet* g1rs,
    95                               ConcurrentG1Refine* cg1r) :
    96     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
    97   {}
    98   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    99     bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
   100     // This path is executed by the concurrent refine or mutator threads,
   101     // concurrently, and so we do not care if card_ptr contains references
   102     // that point into the collection set.
   103     assert(!oops_into_cset, "should be");
   105     if (_concurrent && _sts->should_yield()) {
   106       // Caller will actually yield.
   107       return false;
   108     }
   109     // Otherwise, we finished successfully; return true.
   110     return true;
   111   }
   112   void set_concurrent(bool b) { _concurrent = b; }
   113 };
   116 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
   117   int _calls;
   118   G1CollectedHeap* _g1h;
   119   CardTableModRefBS* _ctbs;
   120   int _histo[256];
   121 public:
   122   ClearLoggedCardTableEntryClosure() :
   123     _calls(0)
   124   {
   125     _g1h = G1CollectedHeap::heap();
   126     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   127     for (int i = 0; i < 256; i++) _histo[i] = 0;
   128   }
   129   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   130     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   131       _calls++;
   132       unsigned char* ujb = (unsigned char*)card_ptr;
   133       int ind = (int)(*ujb);
   134       _histo[ind]++;
   135       *card_ptr = -1;
   136     }
   137     return true;
   138   }
   139   int calls() { return _calls; }
   140   void print_histo() {
   141     gclog_or_tty->print_cr("Card table value histogram:");
   142     for (int i = 0; i < 256; i++) {
   143       if (_histo[i] != 0) {
   144         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
   145       }
   146     }
   147   }
   148 };
   150 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
   151   int _calls;
   152   G1CollectedHeap* _g1h;
   153   CardTableModRefBS* _ctbs;
   154 public:
   155   RedirtyLoggedCardTableEntryClosure() :
   156     _calls(0)
   157   {
   158     _g1h = G1CollectedHeap::heap();
   159     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   160   }
   161   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   162     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   163       _calls++;
   164       *card_ptr = 0;
   165     }
   166     return true;
   167   }
   168   int calls() { return _calls; }
   169 };
   171 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
   172 public:
   173   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   174     *card_ptr = CardTableModRefBS::dirty_card_val();
   175     return true;
   176   }
   177 };
   179 YoungList::YoungList(G1CollectedHeap* g1h) :
   180     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
   181     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
   182   guarantee(check_list_empty(false), "just making sure...");
   183 }
   185 void YoungList::push_region(HeapRegion *hr) {
   186   assert(!hr->is_young(), "should not already be young");
   187   assert(hr->get_next_young_region() == NULL, "cause it should!");
   189   hr->set_next_young_region(_head);
   190   _head = hr;
   192   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
   193   ++_length;
   194 }
   196 void YoungList::add_survivor_region(HeapRegion* hr) {
   197   assert(hr->is_survivor(), "should be flagged as survivor region");
   198   assert(hr->get_next_young_region() == NULL, "cause it should!");
   200   hr->set_next_young_region(_survivor_head);
   201   if (_survivor_head == NULL) {
   202     _survivor_tail = hr;
   203   }
   204   _survivor_head = hr;
   205   ++_survivor_length;
   206 }
   208 void YoungList::empty_list(HeapRegion* list) {
   209   while (list != NULL) {
   210     HeapRegion* next = list->get_next_young_region();
   211     list->set_next_young_region(NULL);
   212     list->uninstall_surv_rate_group();
   213     list->set_not_young();
   214     list = next;
   215   }
   216 }
   218 void YoungList::empty_list() {
   219   assert(check_list_well_formed(), "young list should be well formed");
   221   empty_list(_head);
   222   _head = NULL;
   223   _length = 0;
   225   empty_list(_survivor_head);
   226   _survivor_head = NULL;
   227   _survivor_tail = NULL;
   228   _survivor_length = 0;
   230   _last_sampled_rs_lengths = 0;
   232   assert(check_list_empty(false), "just making sure...");
   233 }
   235 bool YoungList::check_list_well_formed() {
   236   bool ret = true;
   238   uint length = 0;
   239   HeapRegion* curr = _head;
   240   HeapRegion* last = NULL;
   241   while (curr != NULL) {
   242     if (!curr->is_young()) {
   243       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   244                              "incorrectly tagged (y: %d, surv: %d)",
   245                              curr->bottom(), curr->end(),
   246                              curr->is_young(), curr->is_survivor());
   247       ret = false;
   248     }
   249     ++length;
   250     last = curr;
   251     curr = curr->get_next_young_region();
   252   }
   253   ret = ret && (length == _length);
   255   if (!ret) {
   256     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   257     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
   258                            length, _length);
   259   }
   261   return ret;
   262 }
   264 bool YoungList::check_list_empty(bool check_sample) {
   265   bool ret = true;
   267   if (_length != 0) {
   268     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
   269                   _length);
   270     ret = false;
   271   }
   272   if (check_sample && _last_sampled_rs_lengths != 0) {
   273     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   274     ret = false;
   275   }
   276   if (_head != NULL) {
   277     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   278     ret = false;
   279   }
   280   if (!ret) {
   281     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   282   }
   284   return ret;
   285 }
   287 void
   288 YoungList::rs_length_sampling_init() {
   289   _sampled_rs_lengths = 0;
   290   _curr               = _head;
   291 }
   293 bool
   294 YoungList::rs_length_sampling_more() {
   295   return _curr != NULL;
   296 }
   298 void
   299 YoungList::rs_length_sampling_next() {
   300   assert( _curr != NULL, "invariant" );
   301   size_t rs_length = _curr->rem_set()->occupied();
   303   _sampled_rs_lengths += rs_length;
   305   // The current region may not yet have been added to the
   306   // incremental collection set (it gets added when it is
   307   // retired as the current allocation region).
   308   if (_curr->in_collection_set()) {
   309     // Update the collection set policy information for this region
   310     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
   311   }
   313   _curr = _curr->get_next_young_region();
   314   if (_curr == NULL) {
   315     _last_sampled_rs_lengths = _sampled_rs_lengths;
   316     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   317   }
   318 }
   320 void
   321 YoungList::reset_auxilary_lists() {
   322   guarantee( is_empty(), "young list should be empty" );
   323   assert(check_list_well_formed(), "young list should be well formed");
   325   // Add survivor regions to SurvRateGroup.
   326   _g1h->g1_policy()->note_start_adding_survivor_regions();
   327   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   329   int young_index_in_cset = 0;
   330   for (HeapRegion* curr = _survivor_head;
   331        curr != NULL;
   332        curr = curr->get_next_young_region()) {
   333     _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
   335     // The region is a non-empty survivor so let's add it to
   336     // the incremental collection set for the next evacuation
   337     // pause.
   338     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   339     young_index_in_cset += 1;
   340   }
   341   assert((uint) young_index_in_cset == _survivor_length, "post-condition");
   342   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   344   _head   = _survivor_head;
   345   _length = _survivor_length;
   346   if (_survivor_head != NULL) {
   347     assert(_survivor_tail != NULL, "cause it shouldn't be");
   348     assert(_survivor_length > 0, "invariant");
   349     _survivor_tail->set_next_young_region(NULL);
   350   }
   352   // Don't clear the survivor list handles until the start of
   353   // the next evacuation pause - we need it in order to re-tag
   354   // the survivor regions from this evacuation pause as 'young'
   355   // at the start of the next.
   357   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   359   assert(check_list_well_formed(), "young list should be well formed");
   360 }
   362 void YoungList::print() {
   363   HeapRegion* lists[] = {_head,   _survivor_head};
   364   const char* names[] = {"YOUNG", "SURVIVOR"};
   366   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   367     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   368     HeapRegion *curr = lists[list];
   369     if (curr == NULL)
   370       gclog_or_tty->print_cr("  empty");
   371     while (curr != NULL) {
   372       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
   373                              HR_FORMAT_PARAMS(curr),
   374                              curr->prev_top_at_mark_start(),
   375                              curr->next_top_at_mark_start(),
   376                              curr->age_in_surv_rate_group_cond());
   377       curr = curr->get_next_young_region();
   378     }
   379   }
   381   gclog_or_tty->print_cr("");
   382 }
   384 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   385 {
   386   // Claim the right to put the region on the dirty cards region list
   387   // by installing a self pointer.
   388   HeapRegion* next = hr->get_next_dirty_cards_region();
   389   if (next == NULL) {
   390     HeapRegion* res = (HeapRegion*)
   391       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   392                           NULL);
   393     if (res == NULL) {
   394       HeapRegion* head;
   395       do {
   396         // Put the region to the dirty cards region list.
   397         head = _dirty_cards_region_list;
   398         next = (HeapRegion*)
   399           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   400         if (next == head) {
   401           assert(hr->get_next_dirty_cards_region() == hr,
   402                  "hr->get_next_dirty_cards_region() != hr");
   403           if (next == NULL) {
   404             // The last region in the list points to itself.
   405             hr->set_next_dirty_cards_region(hr);
   406           } else {
   407             hr->set_next_dirty_cards_region(next);
   408           }
   409         }
   410       } while (next != head);
   411     }
   412   }
   413 }
   415 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   416 {
   417   HeapRegion* head;
   418   HeapRegion* hr;
   419   do {
   420     head = _dirty_cards_region_list;
   421     if (head == NULL) {
   422       return NULL;
   423     }
   424     HeapRegion* new_head = head->get_next_dirty_cards_region();
   425     if (head == new_head) {
   426       // The last region.
   427       new_head = NULL;
   428     }
   429     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   430                                           head);
   431   } while (hr != head);
   432   assert(hr != NULL, "invariant");
   433   hr->set_next_dirty_cards_region(NULL);
   434   return hr;
   435 }
   437 void G1CollectedHeap::stop_conc_gc_threads() {
   438   _cg1r->stop();
   439   _cmThread->stop();
   440 }
   442 #ifdef ASSERT
   443 // A region is added to the collection set as it is retired
   444 // so an address p can point to a region which will be in the
   445 // collection set but has not yet been retired.  This method
   446 // therefore is only accurate during a GC pause after all
   447 // regions have been retired.  It is used for debugging
   448 // to check if an nmethod has references to objects that can
   449 // be move during a partial collection.  Though it can be
   450 // inaccurate, it is sufficient for G1 because the conservative
   451 // implementation of is_scavengable() for G1 will indicate that
   452 // all nmethods must be scanned during a partial collection.
   453 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
   454   HeapRegion* hr = heap_region_containing(p);
   455   return hr != NULL && hr->in_collection_set();
   456 }
   457 #endif
   459 // Returns true if the reference points to an object that
   460 // can move in an incremental collecction.
   461 bool G1CollectedHeap::is_scavengable(const void* p) {
   462   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   463   G1CollectorPolicy* g1p = g1h->g1_policy();
   464   HeapRegion* hr = heap_region_containing(p);
   465   if (hr == NULL) {
   466      // null
   467      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
   468      return false;
   469   } else {
   470     return !hr->isHumongous();
   471   }
   472 }
   474 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   475   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   476   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   478   // Count the dirty cards at the start.
   479   CountNonCleanMemRegionClosure count1(this);
   480   ct_bs->mod_card_iterate(&count1);
   481   int orig_count = count1.n();
   483   // First clear the logged cards.
   484   ClearLoggedCardTableEntryClosure clear;
   485   dcqs.set_closure(&clear);
   486   dcqs.apply_closure_to_all_completed_buffers();
   487   dcqs.iterate_closure_all_threads(false);
   488   clear.print_histo();
   490   // Now ensure that there's no dirty cards.
   491   CountNonCleanMemRegionClosure count2(this);
   492   ct_bs->mod_card_iterate(&count2);
   493   if (count2.n() != 0) {
   494     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   495                            count2.n(), orig_count);
   496   }
   497   guarantee(count2.n() == 0, "Card table should be clean.");
   499   RedirtyLoggedCardTableEntryClosure redirty;
   500   JavaThread::dirty_card_queue_set().set_closure(&redirty);
   501   dcqs.apply_closure_to_all_completed_buffers();
   502   dcqs.iterate_closure_all_threads(false);
   503   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   504                          clear.calls(), orig_count);
   505   guarantee(redirty.calls() == clear.calls(),
   506             "Or else mechanism is broken.");
   508   CountNonCleanMemRegionClosure count3(this);
   509   ct_bs->mod_card_iterate(&count3);
   510   if (count3.n() != orig_count) {
   511     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   512                            orig_count, count3.n());
   513     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   514   }
   516   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
   517 }
   519 // Private class members.
   521 G1CollectedHeap* G1CollectedHeap::_g1h;
   523 // Private methods.
   525 HeapRegion*
   526 G1CollectedHeap::new_region_try_secondary_free_list() {
   527   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   528   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
   529     if (!_secondary_free_list.is_empty()) {
   530       if (G1ConcRegionFreeingVerbose) {
   531         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   532                                "secondary_free_list has %u entries",
   533                                _secondary_free_list.length());
   534       }
   535       // It looks as if there are free regions available on the
   536       // secondary_free_list. Let's move them to the free_list and try
   537       // again to allocate from it.
   538       append_secondary_free_list();
   540       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
   541              "empty we should have moved at least one entry to the free_list");
   542       HeapRegion* res = _free_list.remove_head();
   543       if (G1ConcRegionFreeingVerbose) {
   544         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   545                                "allocated "HR_FORMAT" from secondary_free_list",
   546                                HR_FORMAT_PARAMS(res));
   547       }
   548       return res;
   549     }
   551     // Wait here until we get notifed either when (a) there are no
   552     // more free regions coming or (b) some regions have been moved on
   553     // the secondary_free_list.
   554     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
   555   }
   557   if (G1ConcRegionFreeingVerbose) {
   558     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   559                            "could not allocate from secondary_free_list");
   560   }
   561   return NULL;
   562 }
   564 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
   565   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
   566          "the only time we use this to allocate a humongous region is "
   567          "when we are allocating a single humongous region");
   569   HeapRegion* res;
   570   if (G1StressConcRegionFreeing) {
   571     if (!_secondary_free_list.is_empty()) {
   572       if (G1ConcRegionFreeingVerbose) {
   573         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   574                                "forced to look at the secondary_free_list");
   575       }
   576       res = new_region_try_secondary_free_list();
   577       if (res != NULL) {
   578         return res;
   579       }
   580     }
   581   }
   582   res = _free_list.remove_head_or_null();
   583   if (res == NULL) {
   584     if (G1ConcRegionFreeingVerbose) {
   585       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   586                              "res == NULL, trying the secondary_free_list");
   587     }
   588     res = new_region_try_secondary_free_list();
   589   }
   590   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
   591     // Currently, only attempts to allocate GC alloc regions set
   592     // do_expand to true. So, we should only reach here during a
   593     // safepoint. If this assumption changes we might have to
   594     // reconsider the use of _expand_heap_after_alloc_failure.
   595     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
   597     ergo_verbose1(ErgoHeapSizing,
   598                   "attempt heap expansion",
   599                   ergo_format_reason("region allocation request failed")
   600                   ergo_format_byte("allocation request"),
   601                   word_size * HeapWordSize);
   602     if (expand(word_size * HeapWordSize)) {
   603       // Given that expand() succeeded in expanding the heap, and we
   604       // always expand the heap by an amount aligned to the heap
   605       // region size, the free list should in theory not be empty. So
   606       // it would probably be OK to use remove_head(). But the extra
   607       // check for NULL is unlikely to be a performance issue here (we
   608       // just expanded the heap!) so let's just be conservative and
   609       // use remove_head_or_null().
   610       res = _free_list.remove_head_or_null();
   611     } else {
   612       _expand_heap_after_alloc_failure = false;
   613     }
   614   }
   615   return res;
   616 }
   618 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
   619                                                         size_t word_size) {
   620   assert(isHumongous(word_size), "word_size should be humongous");
   621   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   623   uint first = G1_NULL_HRS_INDEX;
   624   if (num_regions == 1) {
   625     // Only one region to allocate, no need to go through the slower
   626     // path. The caller will attempt the expasion if this fails, so
   627     // let's not try to expand here too.
   628     HeapRegion* hr = new_region(word_size, false /* do_expand */);
   629     if (hr != NULL) {
   630       first = hr->hrs_index();
   631     } else {
   632       first = G1_NULL_HRS_INDEX;
   633     }
   634   } else {
   635     // We can't allocate humongous regions while cleanupComplete() is
   636     // running, since some of the regions we find to be empty might not
   637     // yet be added to the free list and it is not straightforward to
   638     // know which list they are on so that we can remove them. Note
   639     // that we only need to do this if we need to allocate more than
   640     // one region to satisfy the current humongous allocation
   641     // request. If we are only allocating one region we use the common
   642     // region allocation code (see above).
   643     wait_while_free_regions_coming();
   644     append_secondary_free_list_if_not_empty_with_lock();
   646     if (free_regions() >= num_regions) {
   647       first = _hrs.find_contiguous(num_regions);
   648       if (first != G1_NULL_HRS_INDEX) {
   649         for (uint i = first; i < first + num_regions; ++i) {
   650           HeapRegion* hr = region_at(i);
   651           assert(hr->is_empty(), "sanity");
   652           assert(is_on_master_free_list(hr), "sanity");
   653           hr->set_pending_removal(true);
   654         }
   655         _free_list.remove_all_pending(num_regions);
   656       }
   657     }
   658   }
   659   return first;
   660 }
   662 HeapWord*
   663 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
   664                                                            uint num_regions,
   665                                                            size_t word_size) {
   666   assert(first != G1_NULL_HRS_INDEX, "pre-condition");
   667   assert(isHumongous(word_size), "word_size should be humongous");
   668   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   670   // Index of last region in the series + 1.
   671   uint last = first + num_regions;
   673   // We need to initialize the region(s) we just discovered. This is
   674   // a bit tricky given that it can happen concurrently with
   675   // refinement threads refining cards on these regions and
   676   // potentially wanting to refine the BOT as they are scanning
   677   // those cards (this can happen shortly after a cleanup; see CR
   678   // 6991377). So we have to set up the region(s) carefully and in
   679   // a specific order.
   681   // The word size sum of all the regions we will allocate.
   682   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
   683   assert(word_size <= word_size_sum, "sanity");
   685   // This will be the "starts humongous" region.
   686   HeapRegion* first_hr = region_at(first);
   687   // The header of the new object will be placed at the bottom of
   688   // the first region.
   689   HeapWord* new_obj = first_hr->bottom();
   690   // This will be the new end of the first region in the series that
   691   // should also match the end of the last region in the seriers.
   692   HeapWord* new_end = new_obj + word_size_sum;
   693   // This will be the new top of the first region that will reflect
   694   // this allocation.
   695   HeapWord* new_top = new_obj + word_size;
   697   // First, we need to zero the header of the space that we will be
   698   // allocating. When we update top further down, some refinement
   699   // threads might try to scan the region. By zeroing the header we
   700   // ensure that any thread that will try to scan the region will
   701   // come across the zero klass word and bail out.
   702   //
   703   // NOTE: It would not have been correct to have used
   704   // CollectedHeap::fill_with_object() and make the space look like
   705   // an int array. The thread that is doing the allocation will
   706   // later update the object header to a potentially different array
   707   // type and, for a very short period of time, the klass and length
   708   // fields will be inconsistent. This could cause a refinement
   709   // thread to calculate the object size incorrectly.
   710   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   712   // We will set up the first region as "starts humongous". This
   713   // will also update the BOT covering all the regions to reflect
   714   // that there is a single object that starts at the bottom of the
   715   // first region.
   716   first_hr->set_startsHumongous(new_top, new_end);
   718   // Then, if there are any, we will set up the "continues
   719   // humongous" regions.
   720   HeapRegion* hr = NULL;
   721   for (uint i = first + 1; i < last; ++i) {
   722     hr = region_at(i);
   723     hr->set_continuesHumongous(first_hr);
   724   }
   725   // If we have "continues humongous" regions (hr != NULL), then the
   726   // end of the last one should match new_end.
   727   assert(hr == NULL || hr->end() == new_end, "sanity");
   729   // Up to this point no concurrent thread would have been able to
   730   // do any scanning on any region in this series. All the top
   731   // fields still point to bottom, so the intersection between
   732   // [bottom,top] and [card_start,card_end] will be empty. Before we
   733   // update the top fields, we'll do a storestore to make sure that
   734   // no thread sees the update to top before the zeroing of the
   735   // object header and the BOT initialization.
   736   OrderAccess::storestore();
   738   // Now that the BOT and the object header have been initialized,
   739   // we can update top of the "starts humongous" region.
   740   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
   741          "new_top should be in this region");
   742   first_hr->set_top(new_top);
   743   if (_hr_printer.is_active()) {
   744     HeapWord* bottom = first_hr->bottom();
   745     HeapWord* end = first_hr->orig_end();
   746     if ((first + 1) == last) {
   747       // the series has a single humongous region
   748       _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
   749     } else {
   750       // the series has more than one humongous regions
   751       _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
   752     }
   753   }
   755   // Now, we will update the top fields of the "continues humongous"
   756   // regions. The reason we need to do this is that, otherwise,
   757   // these regions would look empty and this will confuse parts of
   758   // G1. For example, the code that looks for a consecutive number
   759   // of empty regions will consider them empty and try to
   760   // re-allocate them. We can extend is_empty() to also include
   761   // !continuesHumongous(), but it is easier to just update the top
   762   // fields here. The way we set top for all regions (i.e., top ==
   763   // end for all regions but the last one, top == new_top for the
   764   // last one) is actually used when we will free up the humongous
   765   // region in free_humongous_region().
   766   hr = NULL;
   767   for (uint i = first + 1; i < last; ++i) {
   768     hr = region_at(i);
   769     if ((i + 1) == last) {
   770       // last continues humongous region
   771       assert(hr->bottom() < new_top && new_top <= hr->end(),
   772              "new_top should fall on this region");
   773       hr->set_top(new_top);
   774       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
   775     } else {
   776       // not last one
   777       assert(new_top > hr->end(), "new_top should be above this region");
   778       hr->set_top(hr->end());
   779       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
   780     }
   781   }
   782   // If we have continues humongous regions (hr != NULL), then the
   783   // end of the last one should match new_end and its top should
   784   // match new_top.
   785   assert(hr == NULL ||
   786          (hr->end() == new_end && hr->top() == new_top), "sanity");
   788   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
   789   _summary_bytes_used += first_hr->used();
   790   _humongous_set.add(first_hr);
   792   return new_obj;
   793 }
   795 // If could fit into free regions w/o expansion, try.
   796 // Otherwise, if can expand, do so.
   797 // Otherwise, if using ex regions might help, try with ex given back.
   798 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   799   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   801   verify_region_sets_optional();
   803   size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
   804   uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
   805   uint x_num = expansion_regions();
   806   uint fs = _hrs.free_suffix();
   807   uint first = humongous_obj_allocate_find_first(num_regions, word_size);
   808   if (first == G1_NULL_HRS_INDEX) {
   809     // The only thing we can do now is attempt expansion.
   810     if (fs + x_num >= num_regions) {
   811       // If the number of regions we're trying to allocate for this
   812       // object is at most the number of regions in the free suffix,
   813       // then the call to humongous_obj_allocate_find_first() above
   814       // should have succeeded and we wouldn't be here.
   815       //
   816       // We should only be trying to expand when the free suffix is
   817       // not sufficient for the object _and_ we have some expansion
   818       // room available.
   819       assert(num_regions > fs, "earlier allocation should have succeeded");
   821       ergo_verbose1(ErgoHeapSizing,
   822                     "attempt heap expansion",
   823                     ergo_format_reason("humongous allocation request failed")
   824                     ergo_format_byte("allocation request"),
   825                     word_size * HeapWordSize);
   826       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
   827         // Even though the heap was expanded, it might not have
   828         // reached the desired size. So, we cannot assume that the
   829         // allocation will succeed.
   830         first = humongous_obj_allocate_find_first(num_regions, word_size);
   831       }
   832     }
   833   }
   835   HeapWord* result = NULL;
   836   if (first != G1_NULL_HRS_INDEX) {
   837     result =
   838       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
   839     assert(result != NULL, "it should always return a valid result");
   841     // A successful humongous object allocation changes the used space
   842     // information of the old generation so we need to recalculate the
   843     // sizes and update the jstat counters here.
   844     g1mm()->update_sizes();
   845   }
   847   verify_region_sets_optional();
   849   return result;
   850 }
   852 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   853   assert_heap_not_locked_and_not_at_safepoint();
   854   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
   856   unsigned int dummy_gc_count_before;
   857   return attempt_allocation(word_size, &dummy_gc_count_before);
   858 }
   860 HeapWord*
   861 G1CollectedHeap::mem_allocate(size_t word_size,
   862                               bool*  gc_overhead_limit_was_exceeded) {
   863   assert_heap_not_locked_and_not_at_safepoint();
   865   // Loop until the allocation is satisified, or unsatisfied after GC.
   866   for (int try_count = 1; /* we'll return */; try_count += 1) {
   867     unsigned int gc_count_before;
   869     HeapWord* result = NULL;
   870     if (!isHumongous(word_size)) {
   871       result = attempt_allocation(word_size, &gc_count_before);
   872     } else {
   873       result = attempt_allocation_humongous(word_size, &gc_count_before);
   874     }
   875     if (result != NULL) {
   876       return result;
   877     }
   879     // Create the garbage collection operation...
   880     VM_G1CollectForAllocation op(gc_count_before, word_size);
   881     // ...and get the VM thread to execute it.
   882     VMThread::execute(&op);
   884     if (op.prologue_succeeded() && op.pause_succeeded()) {
   885       // If the operation was successful we'll return the result even
   886       // if it is NULL. If the allocation attempt failed immediately
   887       // after a Full GC, it's unlikely we'll be able to allocate now.
   888       HeapWord* result = op.result();
   889       if (result != NULL && !isHumongous(word_size)) {
   890         // Allocations that take place on VM operations do not do any
   891         // card dirtying and we have to do it here. We only have to do
   892         // this for non-humongous allocations, though.
   893         dirty_young_block(result, word_size);
   894       }
   895       return result;
   896     } else {
   897       assert(op.result() == NULL,
   898              "the result should be NULL if the VM op did not succeed");
   899     }
   901     // Give a warning if we seem to be looping forever.
   902     if ((QueuedAllocationWarningCount > 0) &&
   903         (try_count % QueuedAllocationWarningCount == 0)) {
   904       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
   905     }
   906   }
   908   ShouldNotReachHere();
   909   return NULL;
   910 }
   912 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   913                                            unsigned int *gc_count_before_ret) {
   914   // Make sure you read the note in attempt_allocation_humongous().
   916   assert_heap_not_locked_and_not_at_safepoint();
   917   assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
   918          "be called for humongous allocation requests");
   920   // We should only get here after the first-level allocation attempt
   921   // (attempt_allocation()) failed to allocate.
   923   // We will loop until a) we manage to successfully perform the
   924   // allocation or b) we successfully schedule a collection which
   925   // fails to perform the allocation. b) is the only case when we'll
   926   // return NULL.
   927   HeapWord* result = NULL;
   928   for (int try_count = 1; /* we'll return */; try_count += 1) {
   929     bool should_try_gc;
   930     unsigned int gc_count_before;
   932     {
   933       MutexLockerEx x(Heap_lock);
   935       result = _mutator_alloc_region.attempt_allocation_locked(word_size,
   936                                                       false /* bot_updates */);
   937       if (result != NULL) {
   938         return result;
   939       }
   941       // If we reach here, attempt_allocation_locked() above failed to
   942       // allocate a new region. So the mutator alloc region should be NULL.
   943       assert(_mutator_alloc_region.get() == NULL, "only way to get here");
   945       if (GC_locker::is_active_and_needs_gc()) {
   946         if (g1_policy()->can_expand_young_list()) {
   947           // No need for an ergo verbose message here,
   948           // can_expand_young_list() does this when it returns true.
   949           result = _mutator_alloc_region.attempt_allocation_force(word_size,
   950                                                       false /* bot_updates */);
   951           if (result != NULL) {
   952             return result;
   953           }
   954         }
   955         should_try_gc = false;
   956       } else {
   957         // The GCLocker may not be active but the GCLocker initiated
   958         // GC may not yet have been performed (GCLocker::needs_gc()
   959         // returns true). In this case we do not try this GC and
   960         // wait until the GCLocker initiated GC is performed, and
   961         // then retry the allocation.
   962         if (GC_locker::needs_gc()) {
   963           should_try_gc = false;
   964         } else {
   965           // Read the GC count while still holding the Heap_lock.
   966           gc_count_before = total_collections();
   967           should_try_gc = true;
   968         }
   969       }
   970     }
   972     if (should_try_gc) {
   973       bool succeeded;
   974       result = do_collection_pause(word_size, gc_count_before, &succeeded);
   975       if (result != NULL) {
   976         assert(succeeded, "only way to get back a non-NULL result");
   977         return result;
   978       }
   980       if (succeeded) {
   981         // If we get here we successfully scheduled a collection which
   982         // failed to allocate. No point in trying to allocate
   983         // further. We'll just return NULL.
   984         MutexLockerEx x(Heap_lock);
   985         *gc_count_before_ret = total_collections();
   986         return NULL;
   987       }
   988     } else {
   989       // The GCLocker is either active or the GCLocker initiated
   990       // GC has not yet been performed. Stall until it is and
   991       // then retry the allocation.
   992       GC_locker::stall_until_clear();
   993     }
   995     // We can reach here if we were unsuccessul in scheduling a
   996     // collection (because another thread beat us to it) or if we were
   997     // stalled due to the GC locker. In either can we should retry the
   998     // allocation attempt in case another thread successfully
   999     // performed a collection and reclaimed enough space. We do the
  1000     // first attempt (without holding the Heap_lock) here and the
  1001     // follow-on attempt will be at the start of the next loop
  1002     // iteration (after taking the Heap_lock).
  1003     result = _mutator_alloc_region.attempt_allocation(word_size,
  1004                                                       false /* bot_updates */);
  1005     if (result != NULL) {
  1006       return result;
  1009     // Give a warning if we seem to be looping forever.
  1010     if ((QueuedAllocationWarningCount > 0) &&
  1011         (try_count % QueuedAllocationWarningCount == 0)) {
  1012       warning("G1CollectedHeap::attempt_allocation_slow() "
  1013               "retries %d times", try_count);
  1017   ShouldNotReachHere();
  1018   return NULL;
  1021 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
  1022                                           unsigned int * gc_count_before_ret) {
  1023   // The structure of this method has a lot of similarities to
  1024   // attempt_allocation_slow(). The reason these two were not merged
  1025   // into a single one is that such a method would require several "if
  1026   // allocation is not humongous do this, otherwise do that"
  1027   // conditional paths which would obscure its flow. In fact, an early
  1028   // version of this code did use a unified method which was harder to
  1029   // follow and, as a result, it had subtle bugs that were hard to
  1030   // track down. So keeping these two methods separate allows each to
  1031   // be more readable. It will be good to keep these two in sync as
  1032   // much as possible.
  1034   assert_heap_not_locked_and_not_at_safepoint();
  1035   assert(isHumongous(word_size), "attempt_allocation_humongous() "
  1036          "should only be called for humongous allocations");
  1038   // Humongous objects can exhaust the heap quickly, so we should check if we
  1039   // need to start a marking cycle at each humongous object allocation. We do
  1040   // the check before we do the actual allocation. The reason for doing it
  1041   // before the allocation is that we avoid having to keep track of the newly
  1042   // allocated memory while we do a GC.
  1043   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
  1044                                            word_size)) {
  1045     collect(GCCause::_g1_humongous_allocation);
  1048   // We will loop until a) we manage to successfully perform the
  1049   // allocation or b) we successfully schedule a collection which
  1050   // fails to perform the allocation. b) is the only case when we'll
  1051   // return NULL.
  1052   HeapWord* result = NULL;
  1053   for (int try_count = 1; /* we'll return */; try_count += 1) {
  1054     bool should_try_gc;
  1055     unsigned int gc_count_before;
  1058       MutexLockerEx x(Heap_lock);
  1060       // Given that humongous objects are not allocated in young
  1061       // regions, we'll first try to do the allocation without doing a
  1062       // collection hoping that there's enough space in the heap.
  1063       result = humongous_obj_allocate(word_size);
  1064       if (result != NULL) {
  1065         return result;
  1068       if (GC_locker::is_active_and_needs_gc()) {
  1069         should_try_gc = false;
  1070       } else {
  1071          // The GCLocker may not be active but the GCLocker initiated
  1072         // GC may not yet have been performed (GCLocker::needs_gc()
  1073         // returns true). In this case we do not try this GC and
  1074         // wait until the GCLocker initiated GC is performed, and
  1075         // then retry the allocation.
  1076         if (GC_locker::needs_gc()) {
  1077           should_try_gc = false;
  1078         } else {
  1079           // Read the GC count while still holding the Heap_lock.
  1080           gc_count_before = total_collections();
  1081           should_try_gc = true;
  1086     if (should_try_gc) {
  1087       // If we failed to allocate the humongous object, we should try to
  1088       // do a collection pause (if we're allowed) in case it reclaims
  1089       // enough space for the allocation to succeed after the pause.
  1091       bool succeeded;
  1092       result = do_collection_pause(word_size, gc_count_before, &succeeded);
  1093       if (result != NULL) {
  1094         assert(succeeded, "only way to get back a non-NULL result");
  1095         return result;
  1098       if (succeeded) {
  1099         // If we get here we successfully scheduled a collection which
  1100         // failed to allocate. No point in trying to allocate
  1101         // further. We'll just return NULL.
  1102         MutexLockerEx x(Heap_lock);
  1103         *gc_count_before_ret = total_collections();
  1104         return NULL;
  1106     } else {
  1107       // The GCLocker is either active or the GCLocker initiated
  1108       // GC has not yet been performed. Stall until it is and
  1109       // then retry the allocation.
  1110       GC_locker::stall_until_clear();
  1113     // We can reach here if we were unsuccessul in scheduling a
  1114     // collection (because another thread beat us to it) or if we were
  1115     // stalled due to the GC locker. In either can we should retry the
  1116     // allocation attempt in case another thread successfully
  1117     // performed a collection and reclaimed enough space.  Give a
  1118     // warning if we seem to be looping forever.
  1120     if ((QueuedAllocationWarningCount > 0) &&
  1121         (try_count % QueuedAllocationWarningCount == 0)) {
  1122       warning("G1CollectedHeap::attempt_allocation_humongous() "
  1123               "retries %d times", try_count);
  1127   ShouldNotReachHere();
  1128   return NULL;
  1131 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
  1132                                        bool expect_null_mutator_alloc_region) {
  1133   assert_at_safepoint(true /* should_be_vm_thread */);
  1134   assert(_mutator_alloc_region.get() == NULL ||
  1135                                              !expect_null_mutator_alloc_region,
  1136          "the current alloc region was unexpectedly found to be non-NULL");
  1138   if (!isHumongous(word_size)) {
  1139     return _mutator_alloc_region.attempt_allocation_locked(word_size,
  1140                                                       false /* bot_updates */);
  1141   } else {
  1142     HeapWord* result = humongous_obj_allocate(word_size);
  1143     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
  1144       g1_policy()->set_initiate_conc_mark_if_possible();
  1146     return result;
  1149   ShouldNotReachHere();
  1152 class PostMCRemSetClearClosure: public HeapRegionClosure {
  1153   G1CollectedHeap* _g1h;
  1154   ModRefBarrierSet* _mr_bs;
  1155 public:
  1156   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
  1157     _g1h(g1h), _mr_bs(mr_bs) { }
  1158   bool doHeapRegion(HeapRegion* r) {
  1159     if (r->continuesHumongous()) {
  1160       return false;
  1162     _g1h->reset_gc_time_stamps(r);
  1163     HeapRegionRemSet* hrrs = r->rem_set();
  1164     if (hrrs != NULL) hrrs->clear();
  1165     // You might think here that we could clear just the cards
  1166     // corresponding to the used region.  But no: if we leave a dirty card
  1167     // in a region we might allocate into, then it would prevent that card
  1168     // from being enqueued, and cause it to be missed.
  1169     // Re: the performance cost: we shouldn't be doing full GC anyway!
  1170     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  1171     return false;
  1173 };
  1175 void G1CollectedHeap::clear_rsets_post_compaction() {
  1176   PostMCRemSetClearClosure rs_clear(this, mr_bs());
  1177   heap_region_iterate(&rs_clear);
  1180 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  1181   G1CollectedHeap*   _g1h;
  1182   UpdateRSOopClosure _cl;
  1183   int                _worker_i;
  1184 public:
  1185   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
  1186     _cl(g1->g1_rem_set(), worker_i),
  1187     _worker_i(worker_i),
  1188     _g1h(g1)
  1189   { }
  1191   bool doHeapRegion(HeapRegion* r) {
  1192     if (!r->continuesHumongous()) {
  1193       _cl.set_from(r);
  1194       r->oop_iterate(&_cl);
  1196     return false;
  1198 };
  1200 class ParRebuildRSTask: public AbstractGangTask {
  1201   G1CollectedHeap* _g1;
  1202 public:
  1203   ParRebuildRSTask(G1CollectedHeap* g1)
  1204     : AbstractGangTask("ParRebuildRSTask"),
  1205       _g1(g1)
  1206   { }
  1208   void work(uint worker_id) {
  1209     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
  1210     _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
  1211                                           _g1->workers()->active_workers(),
  1212                                          HeapRegion::RebuildRSClaimValue);
  1214 };
  1216 class PostCompactionPrinterClosure: public HeapRegionClosure {
  1217 private:
  1218   G1HRPrinter* _hr_printer;
  1219 public:
  1220   bool doHeapRegion(HeapRegion* hr) {
  1221     assert(!hr->is_young(), "not expecting to find young regions");
  1222     // We only generate output for non-empty regions.
  1223     if (!hr->is_empty()) {
  1224       if (!hr->isHumongous()) {
  1225         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
  1226       } else if (hr->startsHumongous()) {
  1227         if (hr->region_num() == 1) {
  1228           // single humongous region
  1229           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
  1230         } else {
  1231           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
  1233       } else {
  1234         assert(hr->continuesHumongous(), "only way to get here");
  1235         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
  1238     return false;
  1241   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
  1242     : _hr_printer(hr_printer) { }
  1243 };
  1245 void G1CollectedHeap::print_hrs_post_compaction() {
  1246   PostCompactionPrinterClosure cl(hr_printer());
  1247   heap_region_iterate(&cl);
  1250 double G1CollectedHeap::verify(bool guard, const char* msg) {
  1251   double verify_time_ms = 0.0;
  1253   if (guard && total_collections() >= VerifyGCStartAt) {
  1254     double verify_start = os::elapsedTime();
  1255     HandleMark hm;  // Discard invalid handles created during verification
  1256     gclog_or_tty->print(msg);
  1257     prepare_for_verify();
  1258     Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
  1259     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
  1262   return verify_time_ms;
  1265 void G1CollectedHeap::verify_before_gc() {
  1266   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
  1267   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
  1270 void G1CollectedHeap::verify_after_gc() {
  1271   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
  1272   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
  1275 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1276                                     bool clear_all_soft_refs,
  1277                                     size_t word_size) {
  1278   assert_at_safepoint(true /* should_be_vm_thread */);
  1280   if (GC_locker::check_active_before_gc()) {
  1281     return false;
  1284   SvcGCMarker sgcm(SvcGCMarker::FULL);
  1285   ResourceMark rm;
  1287   print_heap_before_gc();
  1289   size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
  1291   HRSPhaseSetter x(HRSPhaseFullGC);
  1292   verify_region_sets_optional();
  1294   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
  1295                            collector_policy()->should_clear_all_soft_refs();
  1297   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  1300     IsGCActiveMark x;
  1302     // Timing
  1303     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
  1304     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
  1305     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  1307     TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
  1308     TraceCollectorStats tcs(g1mm()->full_collection_counters());
  1309     TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
  1311     double start = os::elapsedTime();
  1312     g1_policy()->record_full_collection_start();
  1314     // Note: When we have a more flexible GC logging framework that
  1315     // allows us to add optional attributes to a GC log record we
  1316     // could consider timing and reporting how long we wait in the
  1317     // following two methods.
  1318     wait_while_free_regions_coming();
  1319     // If we start the compaction before the CM threads finish
  1320     // scanning the root regions we might trip them over as we'll
  1321     // be moving objects / updating references. So let's wait until
  1322     // they are done. By telling them to abort, they should complete
  1323     // early.
  1324     _cm->root_regions()->abort();
  1325     _cm->root_regions()->wait_until_scan_finished();
  1326     append_secondary_free_list_if_not_empty_with_lock();
  1328     gc_prologue(true);
  1329     increment_total_collections(true /* full gc */);
  1330     increment_old_marking_cycles_started();
  1332     size_t g1h_prev_used = used();
  1333     assert(used() == recalculate_used(), "Should be equal");
  1335     verify_before_gc();
  1337     pre_full_gc_dump();
  1339     COMPILER2_PRESENT(DerivedPointerTable::clear());
  1341     // Disable discovery and empty the discovered lists
  1342     // for the CM ref processor.
  1343     ref_processor_cm()->disable_discovery();
  1344     ref_processor_cm()->abandon_partial_discovery();
  1345     ref_processor_cm()->verify_no_references_recorded();
  1347     // Abandon current iterations of concurrent marking and concurrent
  1348     // refinement, if any are in progress. We have to do this before
  1349     // wait_until_scan_finished() below.
  1350     concurrent_mark()->abort();
  1352     // Make sure we'll choose a new allocation region afterwards.
  1353     release_mutator_alloc_region();
  1354     abandon_gc_alloc_regions();
  1355     g1_rem_set()->cleanupHRRS();
  1357     // We should call this after we retire any currently active alloc
  1358     // regions so that all the ALLOC / RETIRE events are generated
  1359     // before the start GC event.
  1360     _hr_printer.start_gc(true /* full */, (size_t) total_collections());
  1362     // We may have added regions to the current incremental collection
  1363     // set between the last GC or pause and now. We need to clear the
  1364     // incremental collection set and then start rebuilding it afresh
  1365     // after this full GC.
  1366     abandon_collection_set(g1_policy()->inc_cset_head());
  1367     g1_policy()->clear_incremental_cset();
  1368     g1_policy()->stop_incremental_cset_building();
  1370     tear_down_region_sets(false /* free_list_only */);
  1371     g1_policy()->set_gcs_are_young(true);
  1373     // See the comments in g1CollectedHeap.hpp and
  1374     // G1CollectedHeap::ref_processing_init() about
  1375     // how reference processing currently works in G1.
  1377     // Temporarily make discovery by the STW ref processor single threaded (non-MT).
  1378     ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
  1380     // Temporarily clear the STW ref processor's _is_alive_non_header field.
  1381     ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
  1383     ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
  1384     ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
  1386     // Do collection work
  1388       HandleMark hm;  // Discard invalid handles created during gc
  1389       G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
  1392     assert(free_regions() == 0, "we should not have added any free regions");
  1393     rebuild_region_sets(false /* free_list_only */);
  1395     // Enqueue any discovered reference objects that have
  1396     // not been removed from the discovered lists.
  1397     ref_processor_stw()->enqueue_discovered_references();
  1399     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1401     MemoryService::track_memory_usage();
  1403     verify_after_gc();
  1405     assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  1406     ref_processor_stw()->verify_no_references_recorded();
  1408     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
  1409     ClassLoaderDataGraph::purge();
  1411     // Note: since we've just done a full GC, concurrent
  1412     // marking is no longer active. Therefore we need not
  1413     // re-enable reference discovery for the CM ref processor.
  1414     // That will be done at the start of the next marking cycle.
  1415     assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
  1416     ref_processor_cm()->verify_no_references_recorded();
  1418     reset_gc_time_stamp();
  1419     // Since everything potentially moved, we will clear all remembered
  1420     // sets, and clear all cards.  Later we will rebuild remebered
  1421     // sets. We will also reset the GC time stamps of the regions.
  1422     clear_rsets_post_compaction();
  1423     check_gc_time_stamps();
  1425     // Resize the heap if necessary.
  1426     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  1428     if (_hr_printer.is_active()) {
  1429       // We should do this after we potentially resize the heap so
  1430       // that all the COMMIT / UNCOMMIT events are generated before
  1431       // the end GC event.
  1433       print_hrs_post_compaction();
  1434       _hr_printer.end_gc(true /* full */, (size_t) total_collections());
  1437     if (_cg1r->use_cache()) {
  1438       _cg1r->clear_and_record_card_counts();
  1439       _cg1r->clear_hot_cache();
  1442     // Rebuild remembered sets of all regions.
  1443     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1444       uint n_workers =
  1445         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  1446                                        workers()->active_workers(),
  1447                                        Threads::number_of_non_daemon_threads());
  1448       assert(UseDynamicNumberOfGCThreads ||
  1449              n_workers == workers()->total_workers(),
  1450              "If not dynamic should be using all the  workers");
  1451       workers()->set_active_workers(n_workers);
  1452       // Set parallel threads in the heap (_n_par_threads) only
  1453       // before a parallel phase and always reset it to 0 after
  1454       // the phase so that the number of parallel threads does
  1455       // no get carried forward to a serial phase where there
  1456       // may be code that is "possibly_parallel".
  1457       set_par_threads(n_workers);
  1459       ParRebuildRSTask rebuild_rs_task(this);
  1460       assert(check_heap_region_claim_values(
  1461              HeapRegion::InitialClaimValue), "sanity check");
  1462       assert(UseDynamicNumberOfGCThreads ||
  1463              workers()->active_workers() == workers()->total_workers(),
  1464         "Unless dynamic should use total workers");
  1465       // Use the most recent number of  active workers
  1466       assert(workers()->active_workers() > 0,
  1467         "Active workers not properly set");
  1468       set_par_threads(workers()->active_workers());
  1469       workers()->run_task(&rebuild_rs_task);
  1470       set_par_threads(0);
  1471       assert(check_heap_region_claim_values(
  1472              HeapRegion::RebuildRSClaimValue), "sanity check");
  1473       reset_heap_region_claim_values();
  1474     } else {
  1475       RebuildRSOutOfRegionClosure rebuild_rs(this);
  1476       heap_region_iterate(&rebuild_rs);
  1479     if (G1Log::fine()) {
  1480       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  1483     if (true) { // FIXME
  1484       MetaspaceGC::compute_new_size();
  1487     // Start a new incremental collection set for the next pause
  1488     assert(g1_policy()->collection_set() == NULL, "must be");
  1489     g1_policy()->start_incremental_cset_building();
  1491     // Clear the _cset_fast_test bitmap in anticipation of adding
  1492     // regions to the incremental collection set for the next
  1493     // evacuation pause.
  1494     clear_cset_fast_test();
  1496     init_mutator_alloc_region();
  1498     double end = os::elapsedTime();
  1499     g1_policy()->record_full_collection_end();
  1501 #ifdef TRACESPINNING
  1502     ParallelTaskTerminator::print_termination_counts();
  1503 #endif
  1505     gc_epilogue(true);
  1507     // Discard all rset updates
  1508     JavaThread::dirty_card_queue_set().abandon_logs();
  1509     assert(!G1DeferredRSUpdate
  1510            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1512     _young_list->reset_sampled_info();
  1513     // At this point there should be no regions in the
  1514     // entire heap tagged as young.
  1515     assert( check_young_list_empty(true /* check_heap */),
  1516       "young list should be empty at this point");
  1518     // Update the number of full collections that have been completed.
  1519     increment_old_marking_cycles_completed(false /* concurrent */);
  1521     _hrs.verify_optional();
  1522     verify_region_sets_optional();
  1524     print_heap_after_gc();
  1526     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  1527     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  1528     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  1529     // before any GC notifications are raised.
  1530     g1mm()->update_sizes();
  1533   post_full_gc_dump();
  1535   return true;
  1538 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1539   // do_collection() will return whether it succeeded in performing
  1540   // the GC. Currently, there is no facility on the
  1541   // do_full_collection() API to notify the caller than the collection
  1542   // did not succeed (e.g., because it was locked out by the GC
  1543   // locker). So, right now, we'll ignore the return value.
  1544   bool dummy = do_collection(true,                /* explicit_gc */
  1545                              clear_all_soft_refs,
  1546                              0                    /* word_size */);
  1549 // This code is mostly copied from TenuredGeneration.
  1550 void
  1551 G1CollectedHeap::
  1552 resize_if_necessary_after_full_collection(size_t word_size) {
  1553   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
  1555   // Include the current allocation, if any, and bytes that will be
  1556   // pre-allocated to support collections, as "used".
  1557   const size_t used_after_gc = used();
  1558   const size_t capacity_after_gc = capacity();
  1559   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1561   // This is enforced in arguments.cpp.
  1562   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
  1563          "otherwise the code below doesn't make sense");
  1565   // We don't have floating point command-line arguments
  1566   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
  1567   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1568   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
  1569   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1571   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
  1572   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
  1574   // We have to be careful here as these two calculations can overflow
  1575   // 32-bit size_t's.
  1576   double used_after_gc_d = (double) used_after_gc;
  1577   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
  1578   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
  1580   // Let's make sure that they are both under the max heap size, which
  1581   // by default will make them fit into a size_t.
  1582   double desired_capacity_upper_bound = (double) max_heap_size;
  1583   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
  1584                                     desired_capacity_upper_bound);
  1585   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
  1586                                     desired_capacity_upper_bound);
  1588   // We can now safely turn them into size_t's.
  1589   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
  1590   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
  1592   // This assert only makes sense here, before we adjust them
  1593   // with respect to the min and max heap size.
  1594   assert(minimum_desired_capacity <= maximum_desired_capacity,
  1595          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
  1596                  "maximum_desired_capacity = "SIZE_FORMAT,
  1597                  minimum_desired_capacity, maximum_desired_capacity));
  1599   // Should not be greater than the heap max size. No need to adjust
  1600   // it with respect to the heap min size as it's a lower bound (i.e.,
  1601   // we'll try to make the capacity larger than it, not smaller).
  1602   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
  1603   // Should not be less than the heap min size. No need to adjust it
  1604   // with respect to the heap max size as it's an upper bound (i.e.,
  1605   // we'll try to make the capacity smaller than it, not greater).
  1606   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
  1608   if (capacity_after_gc < minimum_desired_capacity) {
  1609     // Don't expand unless it's significant
  1610     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1611     ergo_verbose4(ErgoHeapSizing,
  1612                   "attempt heap expansion",
  1613                   ergo_format_reason("capacity lower than "
  1614                                      "min desired capacity after Full GC")
  1615                   ergo_format_byte("capacity")
  1616                   ergo_format_byte("occupancy")
  1617                   ergo_format_byte_perc("min desired capacity"),
  1618                   capacity_after_gc, used_after_gc,
  1619                   minimum_desired_capacity, (double) MinHeapFreeRatio);
  1620     expand(expand_bytes);
  1622     // No expansion, now see if we want to shrink
  1623   } else if (capacity_after_gc > maximum_desired_capacity) {
  1624     // Capacity too large, compute shrinking size
  1625     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1626     ergo_verbose4(ErgoHeapSizing,
  1627                   "attempt heap shrinking",
  1628                   ergo_format_reason("capacity higher than "
  1629                                      "max desired capacity after Full GC")
  1630                   ergo_format_byte("capacity")
  1631                   ergo_format_byte("occupancy")
  1632                   ergo_format_byte_perc("max desired capacity"),
  1633                   capacity_after_gc, used_after_gc,
  1634                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
  1635     shrink(shrink_bytes);
  1640 HeapWord*
  1641 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
  1642                                            bool* succeeded) {
  1643   assert_at_safepoint(true /* should_be_vm_thread */);
  1645   *succeeded = true;
  1646   // Let's attempt the allocation first.
  1647   HeapWord* result =
  1648     attempt_allocation_at_safepoint(word_size,
  1649                                  false /* expect_null_mutator_alloc_region */);
  1650   if (result != NULL) {
  1651     assert(*succeeded, "sanity");
  1652     return result;
  1655   // In a G1 heap, we're supposed to keep allocation from failing by
  1656   // incremental pauses.  Therefore, at least for now, we'll favor
  1657   // expansion over collection.  (This might change in the future if we can
  1658   // do something smarter than full collection to satisfy a failed alloc.)
  1659   result = expand_and_allocate(word_size);
  1660   if (result != NULL) {
  1661     assert(*succeeded, "sanity");
  1662     return result;
  1665   // Expansion didn't work, we'll try to do a Full GC.
  1666   bool gc_succeeded = do_collection(false, /* explicit_gc */
  1667                                     false, /* clear_all_soft_refs */
  1668                                     word_size);
  1669   if (!gc_succeeded) {
  1670     *succeeded = false;
  1671     return NULL;
  1674   // Retry the allocation
  1675   result = attempt_allocation_at_safepoint(word_size,
  1676                                   true /* expect_null_mutator_alloc_region */);
  1677   if (result != NULL) {
  1678     assert(*succeeded, "sanity");
  1679     return result;
  1682   // Then, try a Full GC that will collect all soft references.
  1683   gc_succeeded = do_collection(false, /* explicit_gc */
  1684                                true,  /* clear_all_soft_refs */
  1685                                word_size);
  1686   if (!gc_succeeded) {
  1687     *succeeded = false;
  1688     return NULL;
  1691   // Retry the allocation once more
  1692   result = attempt_allocation_at_safepoint(word_size,
  1693                                   true /* expect_null_mutator_alloc_region */);
  1694   if (result != NULL) {
  1695     assert(*succeeded, "sanity");
  1696     return result;
  1699   assert(!collector_policy()->should_clear_all_soft_refs(),
  1700          "Flag should have been handled and cleared prior to this point");
  1702   // What else?  We might try synchronous finalization later.  If the total
  1703   // space available is large enough for the allocation, then a more
  1704   // complete compaction phase than we've tried so far might be
  1705   // appropriate.
  1706   assert(*succeeded, "sanity");
  1707   return NULL;
  1710 // Attempting to expand the heap sufficiently
  1711 // to support an allocation of the given "word_size".  If
  1712 // successful, perform the allocation and return the address of the
  1713 // allocated block, or else "NULL".
  1715 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1716   assert_at_safepoint(true /* should_be_vm_thread */);
  1718   verify_region_sets_optional();
  1720   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
  1721   ergo_verbose1(ErgoHeapSizing,
  1722                 "attempt heap expansion",
  1723                 ergo_format_reason("allocation request failed")
  1724                 ergo_format_byte("allocation request"),
  1725                 word_size * HeapWordSize);
  1726   if (expand(expand_bytes)) {
  1727     _hrs.verify_optional();
  1728     verify_region_sets_optional();
  1729     return attempt_allocation_at_safepoint(word_size,
  1730                                  false /* expect_null_mutator_alloc_region */);
  1732   return NULL;
  1735 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
  1736                                              HeapWord* new_end) {
  1737   assert(old_end != new_end, "don't call this otherwise");
  1738   assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
  1740   // Update the committed mem region.
  1741   _g1_committed.set_end(new_end);
  1742   // Tell the card table about the update.
  1743   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1744   // Tell the BOT about the update.
  1745   _bot_shared->resize(_g1_committed.word_size());
  1748 bool G1CollectedHeap::expand(size_t expand_bytes) {
  1749   size_t old_mem_size = _g1_storage.committed_size();
  1750   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
  1751   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1752                                        HeapRegion::GrainBytes);
  1753   ergo_verbose2(ErgoHeapSizing,
  1754                 "expand the heap",
  1755                 ergo_format_byte("requested expansion amount")
  1756                 ergo_format_byte("attempted expansion amount"),
  1757                 expand_bytes, aligned_expand_bytes);
  1759   // First commit the memory.
  1760   HeapWord* old_end = (HeapWord*) _g1_storage.high();
  1761   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
  1762   if (successful) {
  1763     // Then propagate this update to the necessary data structures.
  1764     HeapWord* new_end = (HeapWord*) _g1_storage.high();
  1765     update_committed_space(old_end, new_end);
  1767     FreeRegionList expansion_list("Local Expansion List");
  1768     MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
  1769     assert(mr.start() == old_end, "post-condition");
  1770     // mr might be a smaller region than what was requested if
  1771     // expand_by() was unable to allocate the HeapRegion instances
  1772     assert(mr.end() <= new_end, "post-condition");
  1774     size_t actual_expand_bytes = mr.byte_size();
  1775     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
  1776     assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
  1777            "post-condition");
  1778     if (actual_expand_bytes < aligned_expand_bytes) {
  1779       // We could not expand _hrs to the desired size. In this case we
  1780       // need to shrink the committed space accordingly.
  1781       assert(mr.end() < new_end, "invariant");
  1783       size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
  1784       // First uncommit the memory.
  1785       _g1_storage.shrink_by(diff_bytes);
  1786       // Then propagate this update to the necessary data structures.
  1787       update_committed_space(new_end, mr.end());
  1789     _free_list.add_as_tail(&expansion_list);
  1791     if (_hr_printer.is_active()) {
  1792       HeapWord* curr = mr.start();
  1793       while (curr < mr.end()) {
  1794         HeapWord* curr_end = curr + HeapRegion::GrainWords;
  1795         _hr_printer.commit(curr, curr_end);
  1796         curr = curr_end;
  1798       assert(curr == mr.end(), "post-condition");
  1800     g1_policy()->record_new_heap_size(n_regions());
  1801   } else {
  1802     ergo_verbose0(ErgoHeapSizing,
  1803                   "did not expand the heap",
  1804                   ergo_format_reason("heap expansion operation failed"));
  1805     // The expansion of the virtual storage space was unsuccessful.
  1806     // Let's see if it was because we ran out of swap.
  1807     if (G1ExitOnExpansionFailure &&
  1808         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
  1809       // We had head room...
  1810       vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
  1813   return successful;
  1816 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
  1817   size_t old_mem_size = _g1_storage.committed_size();
  1818   size_t aligned_shrink_bytes =
  1819     ReservedSpace::page_align_size_down(shrink_bytes);
  1820   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1821                                          HeapRegion::GrainBytes);
  1822   uint num_regions_deleted = 0;
  1823   MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
  1824   HeapWord* old_end = (HeapWord*) _g1_storage.high();
  1825   assert(mr.end() == old_end, "post-condition");
  1827   ergo_verbose3(ErgoHeapSizing,
  1828                 "shrink the heap",
  1829                 ergo_format_byte("requested shrinking amount")
  1830                 ergo_format_byte("aligned shrinking amount")
  1831                 ergo_format_byte("attempted shrinking amount"),
  1832                 shrink_bytes, aligned_shrink_bytes, mr.byte_size());
  1833   if (mr.byte_size() > 0) {
  1834     if (_hr_printer.is_active()) {
  1835       HeapWord* curr = mr.end();
  1836       while (curr > mr.start()) {
  1837         HeapWord* curr_end = curr;
  1838         curr -= HeapRegion::GrainWords;
  1839         _hr_printer.uncommit(curr, curr_end);
  1841       assert(curr == mr.start(), "post-condition");
  1844     _g1_storage.shrink_by(mr.byte_size());
  1845     HeapWord* new_end = (HeapWord*) _g1_storage.high();
  1846     assert(mr.start() == new_end, "post-condition");
  1848     _expansion_regions += num_regions_deleted;
  1849     update_committed_space(old_end, new_end);
  1850     HeapRegionRemSet::shrink_heap(n_regions());
  1851     g1_policy()->record_new_heap_size(n_regions());
  1852   } else {
  1853     ergo_verbose0(ErgoHeapSizing,
  1854                   "did not shrink the heap",
  1855                   ergo_format_reason("heap shrinking operation failed"));
  1859 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1860   verify_region_sets_optional();
  1862   // We should only reach here at the end of a Full GC which means we
  1863   // should not not be holding to any GC alloc regions. The method
  1864   // below will make sure of that and do any remaining clean up.
  1865   abandon_gc_alloc_regions();
  1867   // Instead of tearing down / rebuilding the free lists here, we
  1868   // could instead use the remove_all_pending() method on free_list to
  1869   // remove only the ones that we need to remove.
  1870   tear_down_region_sets(true /* free_list_only */);
  1871   shrink_helper(shrink_bytes);
  1872   rebuild_region_sets(true /* free_list_only */);
  1874   _hrs.verify_optional();
  1875   verify_region_sets_optional();
  1878 // Public methods.
  1880 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1881 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1882 #endif // _MSC_VER
  1885 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1886   SharedHeap(policy_),
  1887   _g1_policy(policy_),
  1888   _dirty_card_queue_set(false),
  1889   _into_cset_dirty_card_queue_set(false),
  1890   _is_alive_closure_cm(this),
  1891   _is_alive_closure_stw(this),
  1892   _ref_processor_cm(NULL),
  1893   _ref_processor_stw(NULL),
  1894   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1895   _bot_shared(NULL),
  1896   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1897   _evac_failure_scan_stack(NULL) ,
  1898   _mark_in_progress(false),
  1899   _cg1r(NULL), _summary_bytes_used(0),
  1900   _g1mm(NULL),
  1901   _refine_cte_cl(NULL),
  1902   _full_collection(false),
  1903   _free_list("Master Free List"),
  1904   _secondary_free_list("Secondary Free List"),
  1905   _old_set("Old Set"),
  1906   _humongous_set("Master Humongous Set"),
  1907   _free_regions_coming(false),
  1908   _young_list(new YoungList(this)),
  1909   _gc_time_stamp(0),
  1910   _retained_old_gc_alloc_region(NULL),
  1911   _survivor_plab_stats(YoungPLABSize, PLABWeight),
  1912   _old_plab_stats(OldPLABSize, PLABWeight),
  1913   _expand_heap_after_alloc_failure(true),
  1914   _surviving_young_words(NULL),
  1915   _old_marking_cycles_started(0),
  1916   _old_marking_cycles_completed(0),
  1917   _in_cset_fast_test(NULL),
  1918   _in_cset_fast_test_base(NULL),
  1919   _dirty_cards_region_list(NULL),
  1920   _worker_cset_start_region(NULL),
  1921   _worker_cset_start_region_time_stamp(NULL) {
  1922   _g1h = this; // To catch bugs.
  1923   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1924     vm_exit_during_initialization("Failed necessary allocation.");
  1927   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
  1929   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1930   _task_queues = new RefToScanQueueSet(n_queues);
  1932   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1933   assert(n_rem_sets > 0, "Invariant.");
  1935   HeapRegionRemSetIterator** iter_arr =
  1936     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
  1937   for (int i = 0; i < n_queues; i++) {
  1938     iter_arr[i] = new HeapRegionRemSetIterator();
  1940   _rem_set_iterator = iter_arr;
  1942   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
  1943   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
  1945   for (int i = 0; i < n_queues; i++) {
  1946     RefToScanQueue* q = new RefToScanQueue();
  1947     q->initialize();
  1948     _task_queues->register_queue(i, q);
  1951   clear_cset_start_regions();
  1953   // Initialize the G1EvacuationFailureALot counters and flags.
  1954   NOT_PRODUCT(reset_evacuation_should_fail();)
  1956   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1959 jint G1CollectedHeap::initialize() {
  1960   CollectedHeap::pre_initialize();
  1961   os::enable_vtime();
  1963   G1Log::init();
  1965   // Necessary to satisfy locking discipline assertions.
  1967   MutexLocker x(Heap_lock);
  1969   // We have to initialize the printer before committing the heap, as
  1970   // it will be used then.
  1971   _hr_printer.set_active(G1PrintHeapRegions);
  1973   // While there are no constraints in the GC code that HeapWordSize
  1974   // be any particular value, there are multiple other areas in the
  1975   // system which believe this to be true (e.g. oop->object_size in some
  1976   // cases incorrectly returns the size in wordSize units rather than
  1977   // HeapWordSize).
  1978   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  1980   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  1981   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  1983   // Ensure that the sizes are properly aligned.
  1984   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1985   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1987   _cg1r = new ConcurrentG1Refine();
  1989   // Reserve the maximum.
  1991   // When compressed oops are enabled, the preferred heap base
  1992   // is calculated by subtracting the requested size from the
  1993   // 32Gb boundary and using the result as the base address for
  1994   // heap reservation. If the requested size is not aligned to
  1995   // HeapRegion::GrainBytes (i.e. the alignment that is passed
  1996   // into the ReservedHeapSpace constructor) then the actual
  1997   // base of the reserved heap may end up differing from the
  1998   // address that was requested (i.e. the preferred heap base).
  1999   // If this happens then we could end up using a non-optimal
  2000   // compressed oops mode.
  2002   // Since max_byte_size is aligned to the size of a heap region (checked
  2003   // above).
  2004   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  2006   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
  2007                                                  HeapRegion::GrainBytes);
  2009   // It is important to do this in a way such that concurrent readers can't
  2010   // temporarily think somethings in the heap.  (I've actually seen this
  2011   // happen in asserts: DLD.)
  2012   _reserved.set_word_size(0);
  2013   _reserved.set_start((HeapWord*)heap_rs.base());
  2014   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  2016   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
  2018   // Create the gen rem set (and barrier set) for the entire reserved region.
  2019   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  2020   set_barrier_set(rem_set()->bs());
  2021   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  2022     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  2023   } else {
  2024     vm_exit_during_initialization("G1 requires a mod ref bs.");
  2025     return JNI_ENOMEM;
  2028   // Also create a G1 rem set.
  2029   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
  2030     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
  2031   } else {
  2032     vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
  2033     return JNI_ENOMEM;
  2036   // Carve out the G1 part of the heap.
  2038   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  2039   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  2040                            g1_rs.size()/HeapWordSize);
  2042   _g1_storage.initialize(g1_rs, 0);
  2043   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  2044   _hrs.initialize((HeapWord*) _g1_reserved.start(),
  2045                   (HeapWord*) _g1_reserved.end(),
  2046                   _expansion_regions);
  2048   // 6843694 - ensure that the maximum region index can fit
  2049   // in the remembered set structures.
  2050   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  2051   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  2053   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  2054   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  2055   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
  2056             "too many cards per region");
  2058   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
  2060   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  2061                                              heap_word_size(init_byte_size));
  2063   _g1h = this;
  2065    _in_cset_fast_test_length = max_regions();
  2066    _in_cset_fast_test_base =
  2067                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
  2069    // We're biasing _in_cset_fast_test to avoid subtracting the
  2070    // beginning of the heap every time we want to index; basically
  2071    // it's the same with what we do with the card table.
  2072    _in_cset_fast_test = _in_cset_fast_test_base -
  2073                ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  2075    // Clear the _cset_fast_test bitmap in anticipation of adding
  2076    // regions to the incremental collection set for the first
  2077    // evacuation pause.
  2078    clear_cset_fast_test();
  2080   // Create the ConcurrentMark data structure and thread.
  2081   // (Must do this late, so that "max_regions" is defined.)
  2082   _cm       = new ConcurrentMark(heap_rs, max_regions());
  2083   _cmThread = _cm->cmThread();
  2085   // Initialize the from_card cache structure of HeapRegionRemSet.
  2086   HeapRegionRemSet::init_heap(max_regions());
  2088   // Now expand into the initial heap size.
  2089   if (!expand(init_byte_size)) {
  2090     vm_exit_during_initialization("Failed to allocate initial heap.");
  2091     return JNI_ENOMEM;
  2094   // Perform any initialization actions delegated to the policy.
  2095   g1_policy()->init();
  2097   _refine_cte_cl =
  2098     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
  2099                                     g1_rem_set(),
  2100                                     concurrent_g1_refine());
  2101   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
  2103   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  2104                                                SATB_Q_FL_lock,
  2105                                                G1SATBProcessCompletedThreshold,
  2106                                                Shared_SATB_Q_lock);
  2108   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2109                                                 DirtyCardQ_FL_lock,
  2110                                                 concurrent_g1_refine()->yellow_zone(),
  2111                                                 concurrent_g1_refine()->red_zone(),
  2112                                                 Shared_DirtyCardQ_lock);
  2114   if (G1DeferredRSUpdate) {
  2115     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2116                                       DirtyCardQ_FL_lock,
  2117                                       -1, // never trigger processing
  2118                                       -1, // no limit on length
  2119                                       Shared_DirtyCardQ_lock,
  2120                                       &JavaThread::dirty_card_queue_set());
  2123   // Initialize the card queue set used to hold cards containing
  2124   // references into the collection set.
  2125   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
  2126                                              DirtyCardQ_FL_lock,
  2127                                              -1, // never trigger processing
  2128                                              -1, // no limit on length
  2129                                              Shared_DirtyCardQ_lock,
  2130                                              &JavaThread::dirty_card_queue_set());
  2132   // In case we're keeping closure specialization stats, initialize those
  2133   // counts and that mechanism.
  2134   SpecializationStats::clear();
  2136   // Do later initialization work for concurrent refinement.
  2137   _cg1r->init();
  2139   // Here we allocate the dummy full region that is required by the
  2140   // G1AllocRegion class. If we don't pass an address in the reserved
  2141   // space here, lots of asserts fire.
  2143   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
  2144                                              _g1_reserved.start());
  2145   // We'll re-use the same region whether the alloc region will
  2146   // require BOT updates or not and, if it doesn't, then a non-young
  2147   // region will complain that it cannot support allocations without
  2148   // BOT updates. So we'll tag the dummy region as young to avoid that.
  2149   dummy_region->set_young();
  2150   // Make sure it's full.
  2151   dummy_region->set_top(dummy_region->end());
  2152   G1AllocRegion::setup(this, dummy_region);
  2154   init_mutator_alloc_region();
  2156   // Do create of the monitoring and management support so that
  2157   // values in the heap have been properly initialized.
  2158   _g1mm = new G1MonitoringSupport(this);
  2160   return JNI_OK;
  2163 void G1CollectedHeap::ref_processing_init() {
  2164   // Reference processing in G1 currently works as follows:
  2165   //
  2166   // * There are two reference processor instances. One is
  2167   //   used to record and process discovered references
  2168   //   during concurrent marking; the other is used to
  2169   //   record and process references during STW pauses
  2170   //   (both full and incremental).
  2171   // * Both ref processors need to 'span' the entire heap as
  2172   //   the regions in the collection set may be dotted around.
  2173   //
  2174   // * For the concurrent marking ref processor:
  2175   //   * Reference discovery is enabled at initial marking.
  2176   //   * Reference discovery is disabled and the discovered
  2177   //     references processed etc during remarking.
  2178   //   * Reference discovery is MT (see below).
  2179   //   * Reference discovery requires a barrier (see below).
  2180   //   * Reference processing may or may not be MT
  2181   //     (depending on the value of ParallelRefProcEnabled
  2182   //     and ParallelGCThreads).
  2183   //   * A full GC disables reference discovery by the CM
  2184   //     ref processor and abandons any entries on it's
  2185   //     discovered lists.
  2186   //
  2187   // * For the STW processor:
  2188   //   * Non MT discovery is enabled at the start of a full GC.
  2189   //   * Processing and enqueueing during a full GC is non-MT.
  2190   //   * During a full GC, references are processed after marking.
  2191   //
  2192   //   * Discovery (may or may not be MT) is enabled at the start
  2193   //     of an incremental evacuation pause.
  2194   //   * References are processed near the end of a STW evacuation pause.
  2195   //   * For both types of GC:
  2196   //     * Discovery is atomic - i.e. not concurrent.
  2197   //     * Reference discovery will not need a barrier.
  2199   SharedHeap::ref_processing_init();
  2200   MemRegion mr = reserved_region();
  2202   // Concurrent Mark ref processor
  2203   _ref_processor_cm =
  2204     new ReferenceProcessor(mr,    // span
  2205                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
  2206                                 // mt processing
  2207                            (int) ParallelGCThreads,
  2208                                 // degree of mt processing
  2209                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
  2210                                 // mt discovery
  2211                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
  2212                                 // degree of mt discovery
  2213                            false,
  2214                                 // Reference discovery is not atomic
  2215                            &_is_alive_closure_cm,
  2216                                 // is alive closure
  2217                                 // (for efficiency/performance)
  2218                            true);
  2219                                 // Setting next fields of discovered
  2220                                 // lists requires a barrier.
  2222   // STW ref processor
  2223   _ref_processor_stw =
  2224     new ReferenceProcessor(mr,    // span
  2225                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
  2226                                 // mt processing
  2227                            MAX2((int)ParallelGCThreads, 1),
  2228                                 // degree of mt processing
  2229                            (ParallelGCThreads > 1),
  2230                                 // mt discovery
  2231                            MAX2((int)ParallelGCThreads, 1),
  2232                                 // degree of mt discovery
  2233                            true,
  2234                                 // Reference discovery is atomic
  2235                            &_is_alive_closure_stw,
  2236                                 // is alive closure
  2237                                 // (for efficiency/performance)
  2238                            false);
  2239                                 // Setting next fields of discovered
  2240                                 // lists requires a barrier.
  2243 size_t G1CollectedHeap::capacity() const {
  2244   return _g1_committed.byte_size();
  2247 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
  2248   assert(!hr->continuesHumongous(), "pre-condition");
  2249   hr->reset_gc_time_stamp();
  2250   if (hr->startsHumongous()) {
  2251     uint first_index = hr->hrs_index() + 1;
  2252     uint last_index = hr->last_hc_index();
  2253     for (uint i = first_index; i < last_index; i += 1) {
  2254       HeapRegion* chr = region_at(i);
  2255       assert(chr->continuesHumongous(), "sanity");
  2256       chr->reset_gc_time_stamp();
  2261 #ifndef PRODUCT
  2262 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
  2263 private:
  2264   unsigned _gc_time_stamp;
  2265   bool _failures;
  2267 public:
  2268   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
  2269     _gc_time_stamp(gc_time_stamp), _failures(false) { }
  2271   virtual bool doHeapRegion(HeapRegion* hr) {
  2272     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
  2273     if (_gc_time_stamp != region_gc_time_stamp) {
  2274       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
  2275                              "expected %d", HR_FORMAT_PARAMS(hr),
  2276                              region_gc_time_stamp, _gc_time_stamp);
  2277       _failures = true;
  2279     return false;
  2282   bool failures() { return _failures; }
  2283 };
  2285 void G1CollectedHeap::check_gc_time_stamps() {
  2286   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
  2287   heap_region_iterate(&cl);
  2288   guarantee(!cl.failures(), "all GC time stamps should have been reset");
  2290 #endif // PRODUCT
  2292 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  2293                                                  DirtyCardQueue* into_cset_dcq,
  2294                                                  bool concurrent,
  2295                                                  int worker_i) {
  2296   // Clean cards in the hot card cache
  2297   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
  2299   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2300   int n_completed_buffers = 0;
  2301   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
  2302     n_completed_buffers++;
  2304   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
  2305   dcqs.clear_n_completed_buffers();
  2306   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  2310 // Computes the sum of the storage used by the various regions.
  2312 size_t G1CollectedHeap::used() const {
  2313   assert(Heap_lock->owner() != NULL,
  2314          "Should be owned on this thread's behalf.");
  2315   size_t result = _summary_bytes_used;
  2316   // Read only once in case it is set to NULL concurrently
  2317   HeapRegion* hr = _mutator_alloc_region.get();
  2318   if (hr != NULL)
  2319     result += hr->used();
  2320   return result;
  2323 size_t G1CollectedHeap::used_unlocked() const {
  2324   size_t result = _summary_bytes_used;
  2325   return result;
  2328 class SumUsedClosure: public HeapRegionClosure {
  2329   size_t _used;
  2330 public:
  2331   SumUsedClosure() : _used(0) {}
  2332   bool doHeapRegion(HeapRegion* r) {
  2333     if (!r->continuesHumongous()) {
  2334       _used += r->used();
  2336     return false;
  2338   size_t result() { return _used; }
  2339 };
  2341 size_t G1CollectedHeap::recalculate_used() const {
  2342   SumUsedClosure blk;
  2343   heap_region_iterate(&blk);
  2344   return blk.result();
  2347 size_t G1CollectedHeap::unsafe_max_alloc() {
  2348   if (free_regions() > 0) return HeapRegion::GrainBytes;
  2349   // otherwise, is there space in the current allocation region?
  2351   // We need to store the current allocation region in a local variable
  2352   // here. The problem is that this method doesn't take any locks and
  2353   // there may be other threads which overwrite the current allocation
  2354   // region field. attempt_allocation(), for example, sets it to NULL
  2355   // and this can happen *after* the NULL check here but before the call
  2356   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  2357   // to be a problem in the optimized build, since the two loads of the
  2358   // current allocation region field are optimized away.
  2359   HeapRegion* hr = _mutator_alloc_region.get();
  2360   if (hr == NULL) {
  2361     return 0;
  2363   return hr->free();
  2366 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
  2367   switch (cause) {
  2368     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
  2369     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
  2370     case GCCause::_g1_humongous_allocation: return true;
  2371     default:                                return false;
  2375 #ifndef PRODUCT
  2376 void G1CollectedHeap::allocate_dummy_regions() {
  2377   // Let's fill up most of the region
  2378   size_t word_size = HeapRegion::GrainWords - 1024;
  2379   // And as a result the region we'll allocate will be humongous.
  2380   guarantee(isHumongous(word_size), "sanity");
  2382   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
  2383     // Let's use the existing mechanism for the allocation
  2384     HeapWord* dummy_obj = humongous_obj_allocate(word_size);
  2385     if (dummy_obj != NULL) {
  2386       MemRegion mr(dummy_obj, word_size);
  2387       CollectedHeap::fill_with_object(mr);
  2388     } else {
  2389       // If we can't allocate once, we probably cannot allocate
  2390       // again. Let's get out of the loop.
  2391       break;
  2395 #endif // !PRODUCT
  2397 void G1CollectedHeap::increment_old_marking_cycles_started() {
  2398   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
  2399     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
  2400     err_msg("Wrong marking cycle count (started: %d, completed: %d)",
  2401     _old_marking_cycles_started, _old_marking_cycles_completed));
  2403   _old_marking_cycles_started++;
  2406 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
  2407   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  2409   // We assume that if concurrent == true, then the caller is a
  2410   // concurrent thread that was joined the Suspendible Thread
  2411   // Set. If there's ever a cheap way to check this, we should add an
  2412   // assert here.
  2414   // Given that this method is called at the end of a Full GC or of a
  2415   // concurrent cycle, and those can be nested (i.e., a Full GC can
  2416   // interrupt a concurrent cycle), the number of full collections
  2417   // completed should be either one (in the case where there was no
  2418   // nesting) or two (when a Full GC interrupted a concurrent cycle)
  2419   // behind the number of full collections started.
  2421   // This is the case for the inner caller, i.e. a Full GC.
  2422   assert(concurrent ||
  2423          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
  2424          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
  2425          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
  2426                  "is inconsistent with _old_marking_cycles_completed = %u",
  2427                  _old_marking_cycles_started, _old_marking_cycles_completed));
  2429   // This is the case for the outer caller, i.e. the concurrent cycle.
  2430   assert(!concurrent ||
  2431          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
  2432          err_msg("for outer caller (concurrent cycle): "
  2433                  "_old_marking_cycles_started = %u "
  2434                  "is inconsistent with _old_marking_cycles_completed = %u",
  2435                  _old_marking_cycles_started, _old_marking_cycles_completed));
  2437   _old_marking_cycles_completed += 1;
  2439   // We need to clear the "in_progress" flag in the CM thread before
  2440   // we wake up any waiters (especially when ExplicitInvokesConcurrent
  2441   // is set) so that if a waiter requests another System.gc() it doesn't
  2442   // incorrectly see that a marking cyle is still in progress.
  2443   if (concurrent) {
  2444     _cmThread->clear_in_progress();
  2447   // This notify_all() will ensure that a thread that called
  2448   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2449   // and it's waiting for a full GC to finish will be woken up. It is
  2450   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2451   FullGCCount_lock->notify_all();
  2454 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2455   assert_heap_not_locked();
  2457   unsigned int gc_count_before;
  2458   unsigned int old_marking_count_before;
  2459   bool retry_gc;
  2461   do {
  2462     retry_gc = false;
  2465       MutexLocker ml(Heap_lock);
  2467       // Read the GC count while holding the Heap_lock
  2468       gc_count_before = total_collections();
  2469       old_marking_count_before = _old_marking_cycles_started;
  2472     if (should_do_concurrent_full_gc(cause)) {
  2473       // Schedule an initial-mark evacuation pause that will start a
  2474       // concurrent cycle. We're setting word_size to 0 which means that
  2475       // we are not requesting a post-GC allocation.
  2476       VM_G1IncCollectionPause op(gc_count_before,
  2477                                  0,     /* word_size */
  2478                                  true,  /* should_initiate_conc_mark */
  2479                                  g1_policy()->max_pause_time_ms(),
  2480                                  cause);
  2482       VMThread::execute(&op);
  2483       if (!op.pause_succeeded()) {
  2484         if (old_marking_count_before == _old_marking_cycles_started) {
  2485           retry_gc = op.should_retry_gc();
  2486         } else {
  2487           // A Full GC happened while we were trying to schedule the
  2488           // initial-mark GC. No point in starting a new cycle given
  2489           // that the whole heap was collected anyway.
  2492         if (retry_gc) {
  2493           if (GC_locker::is_active_and_needs_gc()) {
  2494             GC_locker::stall_until_clear();
  2498     } else {
  2499       if (cause == GCCause::_gc_locker
  2500           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2502         // Schedule a standard evacuation pause. We're setting word_size
  2503         // to 0 which means that we are not requesting a post-GC allocation.
  2504         VM_G1IncCollectionPause op(gc_count_before,
  2505                                    0,     /* word_size */
  2506                                    false, /* should_initiate_conc_mark */
  2507                                    g1_policy()->max_pause_time_ms(),
  2508                                    cause);
  2509         VMThread::execute(&op);
  2510       } else {
  2511         // Schedule a Full GC.
  2512         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
  2513         VMThread::execute(&op);
  2516   } while (retry_gc);
  2519 bool G1CollectedHeap::is_in(const void* p) const {
  2520   if (_g1_committed.contains(p)) {
  2521     // Given that we know that p is in the committed space,
  2522     // heap_region_containing_raw() should successfully
  2523     // return the containing region.
  2524     HeapRegion* hr = heap_region_containing_raw(p);
  2525     return hr->is_in(p);
  2526   } else {
  2527     return false;
  2531 // Iteration functions.
  2533 // Iterates an OopClosure over all ref-containing fields of objects
  2534 // within a HeapRegion.
  2536 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  2537   MemRegion _mr;
  2538   ExtendedOopClosure* _cl;
  2539 public:
  2540   IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
  2541     : _mr(mr), _cl(cl) {}
  2542   bool doHeapRegion(HeapRegion* r) {
  2543     if (!r->continuesHumongous()) {
  2544       r->oop_iterate(_cl);
  2546     return false;
  2548 };
  2550 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
  2551   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  2552   heap_region_iterate(&blk);
  2555 void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
  2556   IterateOopClosureRegionClosure blk(mr, cl);
  2557   heap_region_iterate(&blk);
  2560 // Iterates an ObjectClosure over all objects within a HeapRegion.
  2562 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  2563   ObjectClosure* _cl;
  2564 public:
  2565   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  2566   bool doHeapRegion(HeapRegion* r) {
  2567     if (! r->continuesHumongous()) {
  2568       r->object_iterate(_cl);
  2570     return false;
  2572 };
  2574 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
  2575   IterateObjectClosureRegionClosure blk(cl);
  2576   heap_region_iterate(&blk);
  2579 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
  2580   // FIXME: is this right?
  2581   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
  2584 // Calls a SpaceClosure on a HeapRegion.
  2586 class SpaceClosureRegionClosure: public HeapRegionClosure {
  2587   SpaceClosure* _cl;
  2588 public:
  2589   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  2590   bool doHeapRegion(HeapRegion* r) {
  2591     _cl->do_space(r);
  2592     return false;
  2594 };
  2596 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  2597   SpaceClosureRegionClosure blk(cl);
  2598   heap_region_iterate(&blk);
  2601 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
  2602   _hrs.iterate(cl);
  2605 void
  2606 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  2607                                                  uint worker_id,
  2608                                                  uint no_of_par_workers,
  2609                                                  jint claim_value) {
  2610   const uint regions = n_regions();
  2611   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  2612                              no_of_par_workers :
  2613                              1);
  2614   assert(UseDynamicNumberOfGCThreads ||
  2615          no_of_par_workers == workers()->total_workers(),
  2616          "Non dynamic should use fixed number of workers");
  2617   // try to spread out the starting points of the workers
  2618   const HeapRegion* start_hr =
  2619                         start_region_for_worker(worker_id, no_of_par_workers);
  2620   const uint start_index = start_hr->hrs_index();
  2622   // each worker will actually look at all regions
  2623   for (uint count = 0; count < regions; ++count) {
  2624     const uint index = (start_index + count) % regions;
  2625     assert(0 <= index && index < regions, "sanity");
  2626     HeapRegion* r = region_at(index);
  2627     // we'll ignore "continues humongous" regions (we'll process them
  2628     // when we come across their corresponding "start humongous"
  2629     // region) and regions already claimed
  2630     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  2631       continue;
  2633     // OK, try to claim it
  2634     if (r->claimHeapRegion(claim_value)) {
  2635       // success!
  2636       assert(!r->continuesHumongous(), "sanity");
  2637       if (r->startsHumongous()) {
  2638         // If the region is "starts humongous" we'll iterate over its
  2639         // "continues humongous" first; in fact we'll do them
  2640         // first. The order is important. In on case, calling the
  2641         // closure on the "starts humongous" region might de-allocate
  2642         // and clear all its "continues humongous" regions and, as a
  2643         // result, we might end up processing them twice. So, we'll do
  2644         // them first (notice: most closures will ignore them anyway) and
  2645         // then we'll do the "starts humongous" region.
  2646         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
  2647           HeapRegion* chr = region_at(ch_index);
  2649           // if the region has already been claimed or it's not
  2650           // "continues humongous" we're done
  2651           if (chr->claim_value() == claim_value ||
  2652               !chr->continuesHumongous()) {
  2653             break;
  2656           // Noone should have claimed it directly. We can given
  2657           // that we claimed its "starts humongous" region.
  2658           assert(chr->claim_value() != claim_value, "sanity");
  2659           assert(chr->humongous_start_region() == r, "sanity");
  2661           if (chr->claimHeapRegion(claim_value)) {
  2662             // we should always be able to claim it; noone else should
  2663             // be trying to claim this region
  2665             bool res2 = cl->doHeapRegion(chr);
  2666             assert(!res2, "Should not abort");
  2668             // Right now, this holds (i.e., no closure that actually
  2669             // does something with "continues humongous" regions
  2670             // clears them). We might have to weaken it in the future,
  2671             // but let's leave these two asserts here for extra safety.
  2672             assert(chr->continuesHumongous(), "should still be the case");
  2673             assert(chr->humongous_start_region() == r, "sanity");
  2674           } else {
  2675             guarantee(false, "we should not reach here");
  2680       assert(!r->continuesHumongous(), "sanity");
  2681       bool res = cl->doHeapRegion(r);
  2682       assert(!res, "Should not abort");
  2687 class ResetClaimValuesClosure: public HeapRegionClosure {
  2688 public:
  2689   bool doHeapRegion(HeapRegion* r) {
  2690     r->set_claim_value(HeapRegion::InitialClaimValue);
  2691     return false;
  2693 };
  2695 void G1CollectedHeap::reset_heap_region_claim_values() {
  2696   ResetClaimValuesClosure blk;
  2697   heap_region_iterate(&blk);
  2700 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
  2701   ResetClaimValuesClosure blk;
  2702   collection_set_iterate(&blk);
  2705 #ifdef ASSERT
  2706 // This checks whether all regions in the heap have the correct claim
  2707 // value. I also piggy-backed on this a check to ensure that the
  2708 // humongous_start_region() information on "continues humongous"
  2709 // regions is correct.
  2711 class CheckClaimValuesClosure : public HeapRegionClosure {
  2712 private:
  2713   jint _claim_value;
  2714   uint _failures;
  2715   HeapRegion* _sh_region;
  2717 public:
  2718   CheckClaimValuesClosure(jint claim_value) :
  2719     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  2720   bool doHeapRegion(HeapRegion* r) {
  2721     if (r->claim_value() != _claim_value) {
  2722       gclog_or_tty->print_cr("Region " HR_FORMAT ", "
  2723                              "claim value = %d, should be %d",
  2724                              HR_FORMAT_PARAMS(r),
  2725                              r->claim_value(), _claim_value);
  2726       ++_failures;
  2728     if (!r->isHumongous()) {
  2729       _sh_region = NULL;
  2730     } else if (r->startsHumongous()) {
  2731       _sh_region = r;
  2732     } else if (r->continuesHumongous()) {
  2733       if (r->humongous_start_region() != _sh_region) {
  2734         gclog_or_tty->print_cr("Region " HR_FORMAT ", "
  2735                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  2736                                HR_FORMAT_PARAMS(r),
  2737                                r->humongous_start_region(),
  2738                                _sh_region);
  2739         ++_failures;
  2742     return false;
  2744   uint failures() { return _failures; }
  2745 };
  2747 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  2748   CheckClaimValuesClosure cl(claim_value);
  2749   heap_region_iterate(&cl);
  2750   return cl.failures() == 0;
  2753 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
  2754 private:
  2755   jint _claim_value;
  2756   uint _failures;
  2758 public:
  2759   CheckClaimValuesInCSetHRClosure(jint claim_value) :
  2760     _claim_value(claim_value), _failures(0) { }
  2762   uint failures() { return _failures; }
  2764   bool doHeapRegion(HeapRegion* hr) {
  2765     assert(hr->in_collection_set(), "how?");
  2766     assert(!hr->isHumongous(), "H-region in CSet");
  2767     if (hr->claim_value() != _claim_value) {
  2768       gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
  2769                              "claim value = %d, should be %d",
  2770                              HR_FORMAT_PARAMS(hr),
  2771                              hr->claim_value(), _claim_value);
  2772       _failures += 1;
  2774     return false;
  2776 };
  2778 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
  2779   CheckClaimValuesInCSetHRClosure cl(claim_value);
  2780   collection_set_iterate(&cl);
  2781   return cl.failures() == 0;
  2783 #endif // ASSERT
  2785 // Clear the cached CSet starting regions and (more importantly)
  2786 // the time stamps. Called when we reset the GC time stamp.
  2787 void G1CollectedHeap::clear_cset_start_regions() {
  2788   assert(_worker_cset_start_region != NULL, "sanity");
  2789   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
  2791   int n_queues = MAX2((int)ParallelGCThreads, 1);
  2792   for (int i = 0; i < n_queues; i++) {
  2793     _worker_cset_start_region[i] = NULL;
  2794     _worker_cset_start_region_time_stamp[i] = 0;
  2798 // Given the id of a worker, obtain or calculate a suitable
  2799 // starting region for iterating over the current collection set.
  2800 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
  2801   assert(get_gc_time_stamp() > 0, "should have been updated by now");
  2803   HeapRegion* result = NULL;
  2804   unsigned gc_time_stamp = get_gc_time_stamp();
  2806   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
  2807     // Cached starting region for current worker was set
  2808     // during the current pause - so it's valid.
  2809     // Note: the cached starting heap region may be NULL
  2810     // (when the collection set is empty).
  2811     result = _worker_cset_start_region[worker_i];
  2812     assert(result == NULL || result->in_collection_set(), "sanity");
  2813     return result;
  2816   // The cached entry was not valid so let's calculate
  2817   // a suitable starting heap region for this worker.
  2819   // We want the parallel threads to start their collection
  2820   // set iteration at different collection set regions to
  2821   // avoid contention.
  2822   // If we have:
  2823   //          n collection set regions
  2824   //          p threads
  2825   // Then thread t will start at region floor ((t * n) / p)
  2827   result = g1_policy()->collection_set();
  2828   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2829     uint cs_size = g1_policy()->cset_region_length();
  2830     uint active_workers = workers()->active_workers();
  2831     assert(UseDynamicNumberOfGCThreads ||
  2832              active_workers == workers()->total_workers(),
  2833              "Unless dynamic should use total workers");
  2835     uint end_ind   = (cs_size * worker_i) / active_workers;
  2836     uint start_ind = 0;
  2838     if (worker_i > 0 &&
  2839         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
  2840       // Previous workers starting region is valid
  2841       // so let's iterate from there
  2842       start_ind = (cs_size * (worker_i - 1)) / active_workers;
  2843       result = _worker_cset_start_region[worker_i - 1];
  2846     for (uint i = start_ind; i < end_ind; i++) {
  2847       result = result->next_in_collection_set();
  2851   // Note: the calculated starting heap region may be NULL
  2852   // (when the collection set is empty).
  2853   assert(result == NULL || result->in_collection_set(), "sanity");
  2854   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
  2855          "should be updated only once per pause");
  2856   _worker_cset_start_region[worker_i] = result;
  2857   OrderAccess::storestore();
  2858   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
  2859   return result;
  2862 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
  2863                                                      uint no_of_par_workers) {
  2864   uint worker_num =
  2865            G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
  2866   assert(UseDynamicNumberOfGCThreads ||
  2867          no_of_par_workers == workers()->total_workers(),
  2868          "Non dynamic should use fixed number of workers");
  2869   const uint start_index = n_regions() * worker_i / worker_num;
  2870   return region_at(start_index);
  2873 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2874   HeapRegion* r = g1_policy()->collection_set();
  2875   while (r != NULL) {
  2876     HeapRegion* next = r->next_in_collection_set();
  2877     if (cl->doHeapRegion(r)) {
  2878       cl->incomplete();
  2879       return;
  2881     r = next;
  2885 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2886                                                   HeapRegionClosure *cl) {
  2887   if (r == NULL) {
  2888     // The CSet is empty so there's nothing to do.
  2889     return;
  2892   assert(r->in_collection_set(),
  2893          "Start region must be a member of the collection set.");
  2894   HeapRegion* cur = r;
  2895   while (cur != NULL) {
  2896     HeapRegion* next = cur->next_in_collection_set();
  2897     if (cl->doHeapRegion(cur) && false) {
  2898       cl->incomplete();
  2899       return;
  2901     cur = next;
  2903   cur = g1_policy()->collection_set();
  2904   while (cur != r) {
  2905     HeapRegion* next = cur->next_in_collection_set();
  2906     if (cl->doHeapRegion(cur) && false) {
  2907       cl->incomplete();
  2908       return;
  2910     cur = next;
  2914 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
  2915   return n_regions() > 0 ? region_at(0) : NULL;
  2919 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2920   Space* res = heap_region_containing(addr);
  2921   return res;
  2924 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2925   Space* sp = space_containing(addr);
  2926   if (sp != NULL) {
  2927     return sp->block_start(addr);
  2929   return NULL;
  2932 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  2933   Space* sp = space_containing(addr);
  2934   assert(sp != NULL, "block_size of address outside of heap");
  2935   return sp->block_size(addr);
  2938 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  2939   Space* sp = space_containing(addr);
  2940   return sp->block_is_obj(addr);
  2943 bool G1CollectedHeap::supports_tlab_allocation() const {
  2944   return true;
  2947 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  2948   return HeapRegion::GrainBytes;
  2951 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  2952   // Return the remaining space in the cur alloc region, but not less than
  2953   // the min TLAB size.
  2955   // Also, this value can be at most the humongous object threshold,
  2956   // since we can't allow tlabs to grow big enough to accomodate
  2957   // humongous objects.
  2959   HeapRegion* hr = _mutator_alloc_region.get();
  2960   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
  2961   if (hr == NULL) {
  2962     return max_tlab_size;
  2963   } else {
  2964     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
  2968 size_t G1CollectedHeap::max_capacity() const {
  2969   return _g1_reserved.byte_size();
  2972 jlong G1CollectedHeap::millis_since_last_gc() {
  2973   // assert(false, "NYI");
  2974   return 0;
  2977 void G1CollectedHeap::prepare_for_verify() {
  2978   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2979     ensure_parsability(false);
  2981   g1_rem_set()->prepare_for_verify();
  2984 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
  2985                                               VerifyOption vo) {
  2986   switch (vo) {
  2987   case VerifyOption_G1UsePrevMarking:
  2988     return hr->obj_allocated_since_prev_marking(obj);
  2989   case VerifyOption_G1UseNextMarking:
  2990     return hr->obj_allocated_since_next_marking(obj);
  2991   case VerifyOption_G1UseMarkWord:
  2992     return false;
  2993   default:
  2994     ShouldNotReachHere();
  2996   return false; // keep some compilers happy
  2999 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
  3000   switch (vo) {
  3001   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
  3002   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
  3003   case VerifyOption_G1UseMarkWord:    return NULL;
  3004   default:                            ShouldNotReachHere();
  3006   return NULL; // keep some compilers happy
  3009 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
  3010   switch (vo) {
  3011   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
  3012   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
  3013   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
  3014   default:                            ShouldNotReachHere();
  3016   return false; // keep some compilers happy
  3019 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
  3020   switch (vo) {
  3021   case VerifyOption_G1UsePrevMarking: return "PTAMS";
  3022   case VerifyOption_G1UseNextMarking: return "NTAMS";
  3023   case VerifyOption_G1UseMarkWord:    return "NONE";
  3024   default:                            ShouldNotReachHere();
  3026   return NULL; // keep some compilers happy
  3029 class VerifyLivenessOopClosure: public OopClosure {
  3030   G1CollectedHeap* _g1h;
  3031   VerifyOption _vo;
  3032 public:
  3033   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
  3034     _g1h(g1h), _vo(vo)
  3035   { }
  3036   void do_oop(narrowOop *p) { do_oop_work(p); }
  3037   void do_oop(      oop *p) { do_oop_work(p); }
  3039   template <class T> void do_oop_work(T *p) {
  3040     oop obj = oopDesc::load_decode_heap_oop(p);
  3041     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
  3042               "Dead object referenced by a not dead object");
  3044 };
  3046 class VerifyObjsInRegionClosure: public ObjectClosure {
  3047 private:
  3048   G1CollectedHeap* _g1h;
  3049   size_t _live_bytes;
  3050   HeapRegion *_hr;
  3051   VerifyOption _vo;
  3052 public:
  3053   // _vo == UsePrevMarking -> use "prev" marking information,
  3054   // _vo == UseNextMarking -> use "next" marking information,
  3055   // _vo == UseMarkWord    -> use mark word from object header.
  3056   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
  3057     : _live_bytes(0), _hr(hr), _vo(vo) {
  3058     _g1h = G1CollectedHeap::heap();
  3060   void do_object(oop o) {
  3061     VerifyLivenessOopClosure isLive(_g1h, _vo);
  3062     assert(o != NULL, "Huh?");
  3063     if (!_g1h->is_obj_dead_cond(o, _vo)) {
  3064       // If the object is alive according to the mark word,
  3065       // then verify that the marking information agrees.
  3066       // Note we can't verify the contra-positive of the
  3067       // above: if the object is dead (according to the mark
  3068       // word), it may not be marked, or may have been marked
  3069       // but has since became dead, or may have been allocated
  3070       // since the last marking.
  3071       if (_vo == VerifyOption_G1UseMarkWord) {
  3072         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
  3075       o->oop_iterate_no_header(&isLive);
  3076       if (!_hr->obj_allocated_since_prev_marking(o)) {
  3077         size_t obj_size = o->size();    // Make sure we don't overflow
  3078         _live_bytes += (obj_size * HeapWordSize);
  3082   size_t live_bytes() { return _live_bytes; }
  3083 };
  3085 class PrintObjsInRegionClosure : public ObjectClosure {
  3086   HeapRegion *_hr;
  3087   G1CollectedHeap *_g1;
  3088 public:
  3089   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  3090     _g1 = G1CollectedHeap::heap();
  3091   };
  3093   void do_object(oop o) {
  3094     if (o != NULL) {
  3095       HeapWord *start = (HeapWord *) o;
  3096       size_t word_sz = o->size();
  3097       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  3098                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  3099                           (void*) o, word_sz,
  3100                           _g1->isMarkedPrev(o),
  3101                           _g1->isMarkedNext(o),
  3102                           _hr->obj_allocated_since_prev_marking(o));
  3103       HeapWord *end = start + word_sz;
  3104       HeapWord *cur;
  3105       int *val;
  3106       for (cur = start; cur < end; cur++) {
  3107         val = (int *) cur;
  3108         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  3112 };
  3114 class VerifyRegionClosure: public HeapRegionClosure {
  3115 private:
  3116   bool             _par;
  3117   VerifyOption     _vo;
  3118   bool             _failures;
  3119 public:
  3120   // _vo == UsePrevMarking -> use "prev" marking information,
  3121   // _vo == UseNextMarking -> use "next" marking information,
  3122   // _vo == UseMarkWord    -> use mark word from object header.
  3123   VerifyRegionClosure(bool par, VerifyOption vo)
  3124     : _par(par),
  3125       _vo(vo),
  3126       _failures(false) {}
  3128   bool failures() {
  3129     return _failures;
  3132   bool doHeapRegion(HeapRegion* r) {
  3133     if (!r->continuesHumongous()) {
  3134       bool failures = false;
  3135       r->verify(_vo, &failures);
  3136       if (failures) {
  3137         _failures = true;
  3138       } else {
  3139         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
  3140         r->object_iterate(&not_dead_yet_cl);
  3141         if (_vo != VerifyOption_G1UseNextMarking) {
  3142           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
  3143             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
  3144                                    "max_live_bytes "SIZE_FORMAT" "
  3145                                    "< calculated "SIZE_FORMAT,
  3146                                    r->bottom(), r->end(),
  3147                                    r->max_live_bytes(),
  3148                                  not_dead_yet_cl.live_bytes());
  3149             _failures = true;
  3151         } else {
  3152           // When vo == UseNextMarking we cannot currently do a sanity
  3153           // check on the live bytes as the calculation has not been
  3154           // finalized yet.
  3158     return false; // stop the region iteration if we hit a failure
  3160 };
  3162 class YoungRefCounterClosure : public OopClosure {
  3163   G1CollectedHeap* _g1h;
  3164   int              _count;
  3165  public:
  3166   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
  3167   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
  3168   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  3170   int count() { return _count; }
  3171   void reset_count() { _count = 0; };
  3172 };
  3174 class VerifyKlassClosure: public KlassClosure {
  3175   YoungRefCounterClosure _young_ref_counter_closure;
  3176   OopClosure *_oop_closure;
  3177  public:
  3178   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
  3179   void do_klass(Klass* k) {
  3180     k->oops_do(_oop_closure);
  3182     _young_ref_counter_closure.reset_count();
  3183     k->oops_do(&_young_ref_counter_closure);
  3184     if (_young_ref_counter_closure.count() > 0) {
  3185       guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
  3188 };
  3190 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
  3191 //       pass it as the perm_blk to SharedHeap::process_strong_roots.
  3192 //       When process_strong_roots stop calling perm_blk->younger_refs_iterate
  3193 //       we can change this closure to extend the simpler OopClosure.
  3194 class VerifyRootsClosure: public OopsInGenClosure {
  3195 private:
  3196   G1CollectedHeap* _g1h;
  3197   VerifyOption     _vo;
  3198   bool             _failures;
  3199 public:
  3200   // _vo == UsePrevMarking -> use "prev" marking information,
  3201   // _vo == UseNextMarking -> use "next" marking information,
  3202   // _vo == UseMarkWord    -> use mark word from object header.
  3203   VerifyRootsClosure(VerifyOption vo) :
  3204     _g1h(G1CollectedHeap::heap()),
  3205     _vo(vo),
  3206     _failures(false) { }
  3208   bool failures() { return _failures; }
  3210   template <class T> void do_oop_nv(T* p) {
  3211     T heap_oop = oopDesc::load_heap_oop(p);
  3212     if (!oopDesc::is_null(heap_oop)) {
  3213       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  3214       if (_g1h->is_obj_dead_cond(obj, _vo)) {
  3215         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  3216                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
  3217         if (_vo == VerifyOption_G1UseMarkWord) {
  3218           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
  3220         obj->print_on(gclog_or_tty);
  3221         _failures = true;
  3226   void do_oop(oop* p)       { do_oop_nv(p); }
  3227   void do_oop(narrowOop* p) { do_oop_nv(p); }
  3228 };
  3230 // This is the task used for parallel heap verification.
  3232 class G1ParVerifyTask: public AbstractGangTask {
  3233 private:
  3234   G1CollectedHeap* _g1h;
  3235   VerifyOption     _vo;
  3236   bool             _failures;
  3238 public:
  3239   // _vo == UsePrevMarking -> use "prev" marking information,
  3240   // _vo == UseNextMarking -> use "next" marking information,
  3241   // _vo == UseMarkWord    -> use mark word from object header.
  3242   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
  3243     AbstractGangTask("Parallel verify task"),
  3244     _g1h(g1h),
  3245     _vo(vo),
  3246     _failures(false) { }
  3248   bool failures() {
  3249     return _failures;
  3252   void work(uint worker_id) {
  3253     HandleMark hm;
  3254     VerifyRegionClosure blk(true, _vo);
  3255     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
  3256                                           _g1h->workers()->active_workers(),
  3257                                           HeapRegion::ParVerifyClaimValue);
  3258     if (blk.failures()) {
  3259       _failures = true;
  3262 };
  3264 void G1CollectedHeap::verify(bool silent) {
  3265   verify(silent, VerifyOption_G1UsePrevMarking);
  3268 void G1CollectedHeap::verify(bool silent,
  3269                              VerifyOption vo) {
  3270   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  3271     if (!silent) { gclog_or_tty->print("Roots "); }
  3272     VerifyRootsClosure rootsCl(vo);
  3274     assert(Thread::current()->is_VM_thread(),
  3275       "Expected to be executed serially by the VM thread at this point");
  3277     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
  3278     VerifyKlassClosure klassCl(this, &rootsCl);
  3280     // We apply the relevant closures to all the oops in the
  3281     // system dictionary, the string table and the code cache.
  3282     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
  3284     // Need cleared claim bits for the strong roots processing
  3285     ClassLoaderDataGraph::clear_claimed_marks();
  3287     process_strong_roots(true,      // activate StrongRootsScope
  3288                          false,     // we set "is scavenging" to false,
  3289                                     // so we don't reset the dirty cards.
  3290                          ScanningOption(so),  // roots scanning options
  3291                          &rootsCl,
  3292                          &blobsCl,
  3293                          &klassCl
  3294                          );
  3296     bool failures = rootsCl.failures();
  3298     if (vo != VerifyOption_G1UseMarkWord) {
  3299       // If we're verifying during a full GC then the region sets
  3300       // will have been torn down at the start of the GC. Therefore
  3301       // verifying the region sets will fail. So we only verify
  3302       // the region sets when not in a full GC.
  3303       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
  3304       verify_region_sets();
  3307     if (!silent) { gclog_or_tty->print("HeapRegions "); }
  3308     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  3309       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3310              "sanity check");
  3312       G1ParVerifyTask task(this, vo);
  3313       assert(UseDynamicNumberOfGCThreads ||
  3314         workers()->active_workers() == workers()->total_workers(),
  3315         "If not dynamic should be using all the workers");
  3316       int n_workers = workers()->active_workers();
  3317       set_par_threads(n_workers);
  3318       workers()->run_task(&task);
  3319       set_par_threads(0);
  3320       if (task.failures()) {
  3321         failures = true;
  3324       // Checks that the expected amount of parallel work was done.
  3325       // The implication is that n_workers is > 0.
  3326       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  3327              "sanity check");
  3329       reset_heap_region_claim_values();
  3331       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3332              "sanity check");
  3333     } else {
  3334       VerifyRegionClosure blk(false, vo);
  3335       heap_region_iterate(&blk);
  3336       if (blk.failures()) {
  3337         failures = true;
  3340     if (!silent) gclog_or_tty->print("RemSet ");
  3341     rem_set()->verify();
  3343     if (failures) {
  3344       gclog_or_tty->print_cr("Heap:");
  3345       // It helps to have the per-region information in the output to
  3346       // help us track down what went wrong. This is why we call
  3347       // print_extended_on() instead of print_on().
  3348       print_extended_on(gclog_or_tty);
  3349       gclog_or_tty->print_cr("");
  3350 #ifndef PRODUCT
  3351       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
  3352         concurrent_mark()->print_reachable("at-verification-failure",
  3353                                            vo, false /* all */);
  3355 #endif
  3356       gclog_or_tty->flush();
  3358     guarantee(!failures, "there should not have been any failures");
  3359   } else {
  3360     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
  3364 class PrintRegionClosure: public HeapRegionClosure {
  3365   outputStream* _st;
  3366 public:
  3367   PrintRegionClosure(outputStream* st) : _st(st) {}
  3368   bool doHeapRegion(HeapRegion* r) {
  3369     r->print_on(_st);
  3370     return false;
  3372 };
  3374 void G1CollectedHeap::print_on(outputStream* st) const {
  3375   st->print(" %-20s", "garbage-first heap");
  3376   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  3377             capacity()/K, used_unlocked()/K);
  3378   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  3379             _g1_storage.low_boundary(),
  3380             _g1_storage.high(),
  3381             _g1_storage.high_boundary());
  3382   st->cr();
  3383   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
  3384   uint young_regions = _young_list->length();
  3385   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
  3386             (size_t) young_regions * HeapRegion::GrainBytes / K);
  3387   uint survivor_regions = g1_policy()->recorded_survivor_regions();
  3388   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
  3389             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
  3390   st->cr();
  3393 void G1CollectedHeap::print_extended_on(outputStream* st) const {
  3394   print_on(st);
  3396   // Print the per-region information.
  3397   st->cr();
  3398   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
  3399                "HS=humongous(starts), HC=humongous(continues), "
  3400                "CS=collection set, F=free, TS=gc time stamp, "
  3401                "PTAMS=previous top-at-mark-start, "
  3402                "NTAMS=next top-at-mark-start)");
  3403   PrintRegionClosure blk(st);
  3404   heap_region_iterate(&blk);
  3407 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  3408   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3409     workers()->print_worker_threads_on(st);
  3411   _cmThread->print_on(st);
  3412   st->cr();
  3413   _cm->print_worker_threads_on(st);
  3414   _cg1r->print_worker_threads_on(st);
  3417 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  3418   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3419     workers()->threads_do(tc);
  3421   tc->do_thread(_cmThread);
  3422   _cg1r->threads_do(tc);
  3425 void G1CollectedHeap::print_tracing_info() const {
  3426   // We'll overload this to mean "trace GC pause statistics."
  3427   if (TraceGen0Time || TraceGen1Time) {
  3428     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  3429     // to that.
  3430     g1_policy()->print_tracing_info();
  3432   if (G1SummarizeRSetStats) {
  3433     g1_rem_set()->print_summary_info();
  3435   if (G1SummarizeConcMark) {
  3436     concurrent_mark()->print_summary_info();
  3438   g1_policy()->print_yg_surv_rate_info();
  3439   SpecializationStats::print();
  3442 #ifndef PRODUCT
  3443 // Helpful for debugging RSet issues.
  3445 class PrintRSetsClosure : public HeapRegionClosure {
  3446 private:
  3447   const char* _msg;
  3448   size_t _occupied_sum;
  3450 public:
  3451   bool doHeapRegion(HeapRegion* r) {
  3452     HeapRegionRemSet* hrrs = r->rem_set();
  3453     size_t occupied = hrrs->occupied();
  3454     _occupied_sum += occupied;
  3456     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
  3457                            HR_FORMAT_PARAMS(r));
  3458     if (occupied == 0) {
  3459       gclog_or_tty->print_cr("  RSet is empty");
  3460     } else {
  3461       hrrs->print();
  3463     gclog_or_tty->print_cr("----------");
  3464     return false;
  3467   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
  3468     gclog_or_tty->cr();
  3469     gclog_or_tty->print_cr("========================================");
  3470     gclog_or_tty->print_cr(msg);
  3471     gclog_or_tty->cr();
  3474   ~PrintRSetsClosure() {
  3475     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
  3476     gclog_or_tty->print_cr("========================================");
  3477     gclog_or_tty->cr();
  3479 };
  3481 void G1CollectedHeap::print_cset_rsets() {
  3482   PrintRSetsClosure cl("Printing CSet RSets");
  3483   collection_set_iterate(&cl);
  3486 void G1CollectedHeap::print_all_rsets() {
  3487   PrintRSetsClosure cl("Printing All RSets");;
  3488   heap_region_iterate(&cl);
  3490 #endif // PRODUCT
  3492 G1CollectedHeap* G1CollectedHeap::heap() {
  3493   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  3494          "not a garbage-first heap");
  3495   return _g1h;
  3498 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  3499   // always_do_update_barrier = false;
  3500   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  3501   // Call allocation profiler
  3502   AllocationProfiler::iterate_since_last_gc();
  3503   // Fill TLAB's and such
  3504   ensure_parsability(true);
  3507 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  3508   // FIXME: what is this about?
  3509   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  3510   // is set.
  3511   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  3512                         "derived pointer present"));
  3513   // always_do_update_barrier = true;
  3515   // We have just completed a GC. Update the soft reference
  3516   // policy with the new heap occupancy
  3517   Universe::update_heap_info_at_gc();
  3520 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  3521                                                unsigned int gc_count_before,
  3522                                                bool* succeeded) {
  3523   assert_heap_not_locked_and_not_at_safepoint();
  3524   g1_policy()->record_stop_world_start();
  3525   VM_G1IncCollectionPause op(gc_count_before,
  3526                              word_size,
  3527                              false, /* should_initiate_conc_mark */
  3528                              g1_policy()->max_pause_time_ms(),
  3529                              GCCause::_g1_inc_collection_pause);
  3530   VMThread::execute(&op);
  3532   HeapWord* result = op.result();
  3533   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
  3534   assert(result == NULL || ret_succeeded,
  3535          "the result should be NULL if the VM did not succeed");
  3536   *succeeded = ret_succeeded;
  3538   assert_heap_not_locked();
  3539   return result;
  3542 void
  3543 G1CollectedHeap::doConcurrentMark() {
  3544   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  3545   if (!_cmThread->in_progress()) {
  3546     _cmThread->set_started();
  3547     CGC_lock->notify();
  3551 size_t G1CollectedHeap::pending_card_num() {
  3552   size_t extra_cards = 0;
  3553   JavaThread *curr = Threads::first();
  3554   while (curr != NULL) {
  3555     DirtyCardQueue& dcq = curr->dirty_card_queue();
  3556     extra_cards += dcq.size();
  3557     curr = curr->next();
  3559   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3560   size_t buffer_size = dcqs.buffer_size();
  3561   size_t buffer_num = dcqs.completed_buffers_num();
  3563   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
  3564   // in bytes - not the number of 'entries'. We need to convert
  3565   // into a number of cards.
  3566   return (buffer_size * buffer_num + extra_cards) / oopSize;
  3569 size_t G1CollectedHeap::cards_scanned() {
  3570   return g1_rem_set()->cardsScanned();
  3573 void
  3574 G1CollectedHeap::setup_surviving_young_words() {
  3575   assert(_surviving_young_words == NULL, "pre-condition");
  3576   uint array_length = g1_policy()->young_cset_region_length();
  3577   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
  3578   if (_surviving_young_words == NULL) {
  3579     vm_exit_out_of_memory(sizeof(size_t) * array_length,
  3580                           "Not enough space for young surv words summary.");
  3582   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
  3583 #ifdef ASSERT
  3584   for (uint i = 0;  i < array_length; ++i) {
  3585     assert( _surviving_young_words[i] == 0, "memset above" );
  3587 #endif // !ASSERT
  3590 void
  3591 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  3592   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3593   uint array_length = g1_policy()->young_cset_region_length();
  3594   for (uint i = 0; i < array_length; ++i) {
  3595     _surviving_young_words[i] += surv_young_words[i];
  3599 void
  3600 G1CollectedHeap::cleanup_surviving_young_words() {
  3601   guarantee( _surviving_young_words != NULL, "pre-condition" );
  3602   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
  3603   _surviving_young_words = NULL;
  3606 #ifdef ASSERT
  3607 class VerifyCSetClosure: public HeapRegionClosure {
  3608 public:
  3609   bool doHeapRegion(HeapRegion* hr) {
  3610     // Here we check that the CSet region's RSet is ready for parallel
  3611     // iteration. The fields that we'll verify are only manipulated
  3612     // when the region is part of a CSet and is collected. Afterwards,
  3613     // we reset these fields when we clear the region's RSet (when the
  3614     // region is freed) so they are ready when the region is
  3615     // re-allocated. The only exception to this is if there's an
  3616     // evacuation failure and instead of freeing the region we leave
  3617     // it in the heap. In that case, we reset these fields during
  3618     // evacuation failure handling.
  3619     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
  3621     // Here's a good place to add any other checks we'd like to
  3622     // perform on CSet regions.
  3623     return false;
  3625 };
  3626 #endif // ASSERT
  3628 #if TASKQUEUE_STATS
  3629 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
  3630   st->print_raw_cr("GC Task Stats");
  3631   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
  3632   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
  3635 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
  3636   print_taskqueue_stats_hdr(st);
  3638   TaskQueueStats totals;
  3639   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3640   for (int i = 0; i < n; ++i) {
  3641     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
  3642     totals += task_queue(i)->stats;
  3644   st->print_raw("tot "); totals.print(st); st->cr();
  3646   DEBUG_ONLY(totals.verify());
  3649 void G1CollectedHeap::reset_taskqueue_stats() {
  3650   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3651   for (int i = 0; i < n; ++i) {
  3652     task_queue(i)->stats.reset();
  3655 #endif // TASKQUEUE_STATS
  3657 void G1CollectedHeap::log_gc_header() {
  3658   if (!G1Log::fine()) {
  3659     return;
  3662   gclog_or_tty->date_stamp(PrintGCDateStamps);
  3663   gclog_or_tty->stamp(PrintGCTimeStamps);
  3665   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
  3666     .append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)")
  3667     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
  3669   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
  3672 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
  3673   if (!G1Log::fine()) {
  3674     return;
  3677   if (G1Log::finer()) {
  3678     if (evacuation_failed()) {
  3679       gclog_or_tty->print(" (to-space exhausted)");
  3681     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
  3682     g1_policy()->phase_times()->note_gc_end();
  3683     g1_policy()->phase_times()->print(pause_time_sec);
  3684     g1_policy()->print_detailed_heap_transition();
  3685   } else {
  3686     if (evacuation_failed()) {
  3687       gclog_or_tty->print("--");
  3689     g1_policy()->print_heap_transition();
  3690     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
  3694 bool
  3695 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
  3696   assert_at_safepoint(true /* should_be_vm_thread */);
  3697   guarantee(!is_gc_active(), "collection is not reentrant");
  3699   if (GC_locker::check_active_before_gc()) {
  3700     return false;
  3703   SvcGCMarker sgcm(SvcGCMarker::MINOR);
  3704   ResourceMark rm;
  3706   print_heap_before_gc();
  3708   HRSPhaseSetter x(HRSPhaseEvacuation);
  3709   verify_region_sets_optional();
  3710   verify_dirty_young_regions();
  3712   // This call will decide whether this pause is an initial-mark
  3713   // pause. If it is, during_initial_mark_pause() will return true
  3714   // for the duration of this pause.
  3715   g1_policy()->decide_on_conc_mark_initiation();
  3717   // We do not allow initial-mark to be piggy-backed on a mixed GC.
  3718   assert(!g1_policy()->during_initial_mark_pause() ||
  3719           g1_policy()->gcs_are_young(), "sanity");
  3721   // We also do not allow mixed GCs during marking.
  3722   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
  3724   // Record whether this pause is an initial mark. When the current
  3725   // thread has completed its logging output and it's safe to signal
  3726   // the CM thread, the flag's value in the policy has been reset.
  3727   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
  3729   // Inner scope for scope based logging, timers, and stats collection
  3731     if (g1_policy()->during_initial_mark_pause()) {
  3732       // We are about to start a marking cycle, so we increment the
  3733       // full collection counter.
  3734       increment_old_marking_cycles_started();
  3736     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  3738     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  3739                                 workers()->active_workers() : 1);
  3740     double pause_start_sec = os::elapsedTime();
  3741     g1_policy()->phase_times()->note_gc_start(active_workers);
  3742     log_gc_header();
  3744     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
  3745     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
  3747     // If the secondary_free_list is not empty, append it to the
  3748     // free_list. No need to wait for the cleanup operation to finish;
  3749     // the region allocation code will check the secondary_free_list
  3750     // and wait if necessary. If the G1StressConcRegionFreeing flag is
  3751     // set, skip this step so that the region allocation code has to
  3752     // get entries from the secondary_free_list.
  3753     if (!G1StressConcRegionFreeing) {
  3754       append_secondary_free_list_if_not_empty_with_lock();
  3757     assert(check_young_list_well_formed(),
  3758       "young list should be well formed");
  3760     // Don't dynamically change the number of GC threads this early.  A value of
  3761     // 0 is used to indicate serial work.  When parallel work is done,
  3762     // it will be set.
  3764     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  3765       IsGCActiveMark x;
  3767       gc_prologue(false);
  3768       increment_total_collections(false /* full gc */);
  3769       increment_gc_time_stamp();
  3771       verify_before_gc();
  3773       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3775       // Please see comment in g1CollectedHeap.hpp and
  3776       // G1CollectedHeap::ref_processing_init() to see how
  3777       // reference processing currently works in G1.
  3779       // Enable discovery in the STW reference processor
  3780       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
  3781                                             true /*verify_no_refs*/);
  3784         // We want to temporarily turn off discovery by the
  3785         // CM ref processor, if necessary, and turn it back on
  3786         // on again later if we do. Using a scoped
  3787         // NoRefDiscovery object will do this.
  3788         NoRefDiscovery no_cm_discovery(ref_processor_cm());
  3790         // Forget the current alloc region (we might even choose it to be part
  3791         // of the collection set!).
  3792         release_mutator_alloc_region();
  3794         // We should call this after we retire the mutator alloc
  3795         // region(s) so that all the ALLOC / RETIRE events are generated
  3796         // before the start GC event.
  3797         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
  3799         // This timing is only used by the ergonomics to handle our pause target.
  3800         // It is unclear why this should not include the full pause. We will
  3801         // investigate this in CR 7178365.
  3802         //
  3803         // Preserving the old comment here if that helps the investigation:
  3804         //
  3805         // The elapsed time induced by the start time below deliberately elides
  3806         // the possible verification above.
  3807         double sample_start_time_sec = os::elapsedTime();
  3808         size_t start_used_bytes = used();
  3810 #if YOUNG_LIST_VERBOSE
  3811         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  3812         _young_list->print();
  3813         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3814 #endif // YOUNG_LIST_VERBOSE
  3816         g1_policy()->record_collection_pause_start(sample_start_time_sec,
  3817                                                    start_used_bytes);
  3819         double scan_wait_start = os::elapsedTime();
  3820         // We have to wait until the CM threads finish scanning the
  3821         // root regions as it's the only way to ensure that all the
  3822         // objects on them have been correctly scanned before we start
  3823         // moving them during the GC.
  3824         bool waited = _cm->root_regions()->wait_until_scan_finished();
  3825         double wait_time_ms = 0.0;
  3826         if (waited) {
  3827           double scan_wait_end = os::elapsedTime();
  3828           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
  3830         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
  3832 #if YOUNG_LIST_VERBOSE
  3833         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  3834         _young_list->print();
  3835 #endif // YOUNG_LIST_VERBOSE
  3837         if (g1_policy()->during_initial_mark_pause()) {
  3838           concurrent_mark()->checkpointRootsInitialPre();
  3841 #if YOUNG_LIST_VERBOSE
  3842         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  3843         _young_list->print();
  3844         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3845 #endif // YOUNG_LIST_VERBOSE
  3847         g1_policy()->finalize_cset(target_pause_time_ms);
  3849         _cm->note_start_of_gc();
  3850         // We should not verify the per-thread SATB buffers given that
  3851         // we have not filtered them yet (we'll do so during the
  3852         // GC). We also call this after finalize_cset() to
  3853         // ensure that the CSet has been finalized.
  3854         _cm->verify_no_cset_oops(true  /* verify_stacks */,
  3855                                  true  /* verify_enqueued_buffers */,
  3856                                  false /* verify_thread_buffers */,
  3857                                  true  /* verify_fingers */);
  3859         if (_hr_printer.is_active()) {
  3860           HeapRegion* hr = g1_policy()->collection_set();
  3861           while (hr != NULL) {
  3862             G1HRPrinter::RegionType type;
  3863             if (!hr->is_young()) {
  3864               type = G1HRPrinter::Old;
  3865             } else if (hr->is_survivor()) {
  3866               type = G1HRPrinter::Survivor;
  3867             } else {
  3868               type = G1HRPrinter::Eden;
  3870             _hr_printer.cset(hr);
  3871             hr = hr->next_in_collection_set();
  3875 #ifdef ASSERT
  3876         VerifyCSetClosure cl;
  3877         collection_set_iterate(&cl);
  3878 #endif // ASSERT
  3880         setup_surviving_young_words();
  3882         // Initialize the GC alloc regions.
  3883         init_gc_alloc_regions();
  3885         // Actually do the work...
  3886         evacuate_collection_set();
  3888         // We do this to mainly verify the per-thread SATB buffers
  3889         // (which have been filtered by now) since we didn't verify
  3890         // them earlier. No point in re-checking the stacks / enqueued
  3891         // buffers given that the CSet has not changed since last time
  3892         // we checked.
  3893         _cm->verify_no_cset_oops(false /* verify_stacks */,
  3894                                  false /* verify_enqueued_buffers */,
  3895                                  true  /* verify_thread_buffers */,
  3896                                  true  /* verify_fingers */);
  3898         free_collection_set(g1_policy()->collection_set());
  3899         g1_policy()->clear_collection_set();
  3901         cleanup_surviving_young_words();
  3903         // Start a new incremental collection set for the next pause.
  3904         g1_policy()->start_incremental_cset_building();
  3906         // Clear the _cset_fast_test bitmap in anticipation of adding
  3907         // regions to the incremental collection set for the next
  3908         // evacuation pause.
  3909         clear_cset_fast_test();
  3911         _young_list->reset_sampled_info();
  3913         // Don't check the whole heap at this point as the
  3914         // GC alloc regions from this pause have been tagged
  3915         // as survivors and moved on to the survivor list.
  3916         // Survivor regions will fail the !is_young() check.
  3917         assert(check_young_list_empty(false /* check_heap */),
  3918           "young list should be empty");
  3920 #if YOUNG_LIST_VERBOSE
  3921         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
  3922         _young_list->print();
  3923 #endif // YOUNG_LIST_VERBOSE
  3925         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  3926                                             _young_list->first_survivor_region(),
  3927                                             _young_list->last_survivor_region());
  3929         _young_list->reset_auxilary_lists();
  3931         if (evacuation_failed()) {
  3932           _summary_bytes_used = recalculate_used();
  3933         } else {
  3934           // The "used" of the the collection set have already been subtracted
  3935           // when they were freed.  Add in the bytes evacuated.
  3936           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
  3939         if (g1_policy()->during_initial_mark_pause()) {
  3940           // We have to do this before we notify the CM threads that
  3941           // they can start working to make sure that all the
  3942           // appropriate initialization is done on the CM object.
  3943           concurrent_mark()->checkpointRootsInitialPost();
  3944           set_marking_started();
  3945           // Note that we don't actually trigger the CM thread at
  3946           // this point. We do that later when we're sure that
  3947           // the current thread has completed its logging output.
  3950         allocate_dummy_regions();
  3952 #if YOUNG_LIST_VERBOSE
  3953         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  3954         _young_list->print();
  3955         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3956 #endif // YOUNG_LIST_VERBOSE
  3958         init_mutator_alloc_region();
  3961           size_t expand_bytes = g1_policy()->expansion_amount();
  3962           if (expand_bytes > 0) {
  3963             size_t bytes_before = capacity();
  3964             // No need for an ergo verbose message here,
  3965             // expansion_amount() does this when it returns a value > 0.
  3966             if (!expand(expand_bytes)) {
  3967               // We failed to expand the heap so let's verify that
  3968               // committed/uncommitted amount match the backing store
  3969               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
  3970               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
  3975         // We redo the verificaiton but now wrt to the new CSet which
  3976         // has just got initialized after the previous CSet was freed.
  3977         _cm->verify_no_cset_oops(true  /* verify_stacks */,
  3978                                  true  /* verify_enqueued_buffers */,
  3979                                  true  /* verify_thread_buffers */,
  3980                                  true  /* verify_fingers */);
  3981         _cm->note_end_of_gc();
  3983         // This timing is only used by the ergonomics to handle our pause target.
  3984         // It is unclear why this should not include the full pause. We will
  3985         // investigate this in CR 7178365.
  3986         double sample_end_time_sec = os::elapsedTime();
  3987         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
  3988         g1_policy()->record_collection_pause_end(pause_time_ms);
  3990         MemoryService::track_memory_usage();
  3992         // In prepare_for_verify() below we'll need to scan the deferred
  3993         // update buffers to bring the RSets up-to-date if
  3994         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
  3995         // the update buffers we'll probably need to scan cards on the
  3996         // regions we just allocated to (i.e., the GC alloc
  3997         // regions). However, during the last GC we called
  3998         // set_saved_mark() on all the GC alloc regions, so card
  3999         // scanning might skip the [saved_mark_word()...top()] area of
  4000         // those regions (i.e., the area we allocated objects into
  4001         // during the last GC). But it shouldn't. Given that
  4002         // saved_mark_word() is conditional on whether the GC time stamp
  4003         // on the region is current or not, by incrementing the GC time
  4004         // stamp here we invalidate all the GC time stamps on all the
  4005         // regions and saved_mark_word() will simply return top() for
  4006         // all the regions. This is a nicer way of ensuring this rather
  4007         // than iterating over the regions and fixing them. In fact, the
  4008         // GC time stamp increment here also ensures that
  4009         // saved_mark_word() will return top() between pauses, i.e.,
  4010         // during concurrent refinement. So we don't need the
  4011         // is_gc_active() check to decided which top to use when
  4012         // scanning cards (see CR 7039627).
  4013         increment_gc_time_stamp();
  4015         verify_after_gc();
  4017         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  4018         ref_processor_stw()->verify_no_references_recorded();
  4020         // CM reference discovery will be re-enabled if necessary.
  4023       // We should do this after we potentially expand the heap so
  4024       // that all the COMMIT events are generated before the end GC
  4025       // event, and after we retire the GC alloc regions so that all
  4026       // RETIRE events are generated before the end GC event.
  4027       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
  4029       if (mark_in_progress()) {
  4030         concurrent_mark()->update_g1_committed();
  4033 #ifdef TRACESPINNING
  4034       ParallelTaskTerminator::print_termination_counts();
  4035 #endif
  4037       gc_epilogue(false);
  4039       log_gc_footer(os::elapsedTime() - pause_start_sec);
  4042     // It is not yet to safe to tell the concurrent mark to
  4043     // start as we have some optional output below. We don't want the
  4044     // output from the concurrent mark thread interfering with this
  4045     // logging output either.
  4047     _hrs.verify_optional();
  4048     verify_region_sets_optional();
  4050     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
  4051     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  4053     print_heap_after_gc();
  4055     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  4056     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  4057     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  4058     // before any GC notifications are raised.
  4059     g1mm()->update_sizes();
  4062   if (G1SummarizeRSetStats &&
  4063       (G1SummarizeRSetStatsPeriod > 0) &&
  4064       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  4065     g1_rem_set()->print_summary_info();
  4068   // It should now be safe to tell the concurrent mark thread to start
  4069   // without its logging output interfering with the logging output
  4070   // that came from the pause.
  4072   if (should_start_conc_mark) {
  4073     // CAUTION: after the doConcurrentMark() call below,
  4074     // the concurrent marking thread(s) could be running
  4075     // concurrently with us. Make sure that anything after
  4076     // this point does not assume that we are the only GC thread
  4077     // running. Note: of course, the actual marking work will
  4078     // not start until the safepoint itself is released in
  4079     // ConcurrentGCThread::safepoint_desynchronize().
  4080     doConcurrentMark();
  4083   return true;
  4086 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
  4088   size_t gclab_word_size;
  4089   switch (purpose) {
  4090     case GCAllocForSurvived:
  4091       gclab_word_size = _survivor_plab_stats.desired_plab_sz();
  4092       break;
  4093     case GCAllocForTenured:
  4094       gclab_word_size = _old_plab_stats.desired_plab_sz();
  4095       break;
  4096     default:
  4097       assert(false, "unknown GCAllocPurpose");
  4098       gclab_word_size = _old_plab_stats.desired_plab_sz();
  4099       break;
  4102   // Prevent humongous PLAB sizes for two reasons:
  4103   // * PLABs are allocated using a similar paths as oops, but should
  4104   //   never be in a humongous region
  4105   // * Allowing humongous PLABs needlessly churns the region free lists
  4106   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
  4109 void G1CollectedHeap::init_mutator_alloc_region() {
  4110   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  4111   _mutator_alloc_region.init();
  4114 void G1CollectedHeap::release_mutator_alloc_region() {
  4115   _mutator_alloc_region.release();
  4116   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  4119 void G1CollectedHeap::init_gc_alloc_regions() {
  4120   assert_at_safepoint(true /* should_be_vm_thread */);
  4122   _survivor_gc_alloc_region.init();
  4123   _old_gc_alloc_region.init();
  4124   HeapRegion* retained_region = _retained_old_gc_alloc_region;
  4125   _retained_old_gc_alloc_region = NULL;
  4127   // We will discard the current GC alloc region if:
  4128   // a) it's in the collection set (it can happen!),
  4129   // b) it's already full (no point in using it),
  4130   // c) it's empty (this means that it was emptied during
  4131   // a cleanup and it should be on the free list now), or
  4132   // d) it's humongous (this means that it was emptied
  4133   // during a cleanup and was added to the free list, but
  4134   // has been subseqently used to allocate a humongous
  4135   // object that may be less than the region size).
  4136   if (retained_region != NULL &&
  4137       !retained_region->in_collection_set() &&
  4138       !(retained_region->top() == retained_region->end()) &&
  4139       !retained_region->is_empty() &&
  4140       !retained_region->isHumongous()) {
  4141     retained_region->set_saved_mark();
  4142     // The retained region was added to the old region set when it was
  4143     // retired. We have to remove it now, since we don't allow regions
  4144     // we allocate to in the region sets. We'll re-add it later, when
  4145     // it's retired again.
  4146     _old_set.remove(retained_region);
  4147     bool during_im = g1_policy()->during_initial_mark_pause();
  4148     retained_region->note_start_of_copying(during_im);
  4149     _old_gc_alloc_region.set(retained_region);
  4150     _hr_printer.reuse(retained_region);
  4154 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) {
  4155   _survivor_gc_alloc_region.release();
  4156   // If we have an old GC alloc region to release, we'll save it in
  4157   // _retained_old_gc_alloc_region. If we don't
  4158   // _retained_old_gc_alloc_region will become NULL. This is what we
  4159   // want either way so no reason to check explicitly for either
  4160   // condition.
  4161   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
  4163   if (ResizePLAB) {
  4164     _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
  4165     _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
  4169 void G1CollectedHeap::abandon_gc_alloc_regions() {
  4170   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
  4171   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
  4172   _retained_old_gc_alloc_region = NULL;
  4175 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  4176   _drain_in_progress = false;
  4177   set_evac_failure_closure(cl);
  4178   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
  4181 void G1CollectedHeap::finalize_for_evac_failure() {
  4182   assert(_evac_failure_scan_stack != NULL &&
  4183          _evac_failure_scan_stack->length() == 0,
  4184          "Postcondition");
  4185   assert(!_drain_in_progress, "Postcondition");
  4186   delete _evac_failure_scan_stack;
  4187   _evac_failure_scan_stack = NULL;
  4190 void G1CollectedHeap::remove_self_forwarding_pointers() {
  4191   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  4193   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
  4195   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4196     set_par_threads();
  4197     workers()->run_task(&rsfp_task);
  4198     set_par_threads(0);
  4199   } else {
  4200     rsfp_task.work(0);
  4203   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
  4205   // Reset the claim values in the regions in the collection set.
  4206   reset_cset_heap_region_claim_values();
  4208   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  4210   // Now restore saved marks, if any.
  4211   if (_objs_with_preserved_marks != NULL) {
  4212     assert(_preserved_marks_of_objs != NULL, "Both or none.");
  4213     guarantee(_objs_with_preserved_marks->length() ==
  4214               _preserved_marks_of_objs->length(), "Both or none.");
  4215     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
  4216       oop obj   = _objs_with_preserved_marks->at(i);
  4217       markOop m = _preserved_marks_of_objs->at(i);
  4218       obj->set_mark(m);
  4221     // Delete the preserved marks growable arrays (allocated on the C heap).
  4222     delete _objs_with_preserved_marks;
  4223     delete _preserved_marks_of_objs;
  4224     _objs_with_preserved_marks = NULL;
  4225     _preserved_marks_of_objs = NULL;
  4229 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  4230   _evac_failure_scan_stack->push(obj);
  4233 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  4234   assert(_evac_failure_scan_stack != NULL, "precondition");
  4236   while (_evac_failure_scan_stack->length() > 0) {
  4237      oop obj = _evac_failure_scan_stack->pop();
  4238      _evac_failure_closure->set_region(heap_region_containing(obj));
  4239      obj->oop_iterate_backwards(_evac_failure_closure);
  4243 oop
  4244 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  4245                                                oop old) {
  4246   assert(obj_in_cs(old),
  4247          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
  4248                  (HeapWord*) old));
  4249   markOop m = old->mark();
  4250   oop forward_ptr = old->forward_to_atomic(old);
  4251   if (forward_ptr == NULL) {
  4252     // Forward-to-self succeeded.
  4254     if (_evac_failure_closure != cl) {
  4255       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  4256       assert(!_drain_in_progress,
  4257              "Should only be true while someone holds the lock.");
  4258       // Set the global evac-failure closure to the current thread's.
  4259       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  4260       set_evac_failure_closure(cl);
  4261       // Now do the common part.
  4262       handle_evacuation_failure_common(old, m);
  4263       // Reset to NULL.
  4264       set_evac_failure_closure(NULL);
  4265     } else {
  4266       // The lock is already held, and this is recursive.
  4267       assert(_drain_in_progress, "This should only be the recursive case.");
  4268       handle_evacuation_failure_common(old, m);
  4270     return old;
  4271   } else {
  4272     // Forward-to-self failed. Either someone else managed to allocate
  4273     // space for this object (old != forward_ptr) or they beat us in
  4274     // self-forwarding it (old == forward_ptr).
  4275     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
  4276            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
  4277                    "should not be in the CSet",
  4278                    (HeapWord*) old, (HeapWord*) forward_ptr));
  4279     return forward_ptr;
  4283 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  4284   set_evacuation_failed(true);
  4286   preserve_mark_if_necessary(old, m);
  4288   HeapRegion* r = heap_region_containing(old);
  4289   if (!r->evacuation_failed()) {
  4290     r->set_evacuation_failed(true);
  4291     _hr_printer.evac_failure(r);
  4294   push_on_evac_failure_scan_stack(old);
  4296   if (!_drain_in_progress) {
  4297     // prevent recursion in copy_to_survivor_space()
  4298     _drain_in_progress = true;
  4299     drain_evac_failure_scan_stack();
  4300     _drain_in_progress = false;
  4304 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  4305   assert(evacuation_failed(), "Oversaving!");
  4306   // We want to call the "for_promotion_failure" version only in the
  4307   // case of a promotion failure.
  4308   if (m->must_be_preserved_for_promotion_failure(obj)) {
  4309     if (_objs_with_preserved_marks == NULL) {
  4310       assert(_preserved_marks_of_objs == NULL, "Both or none.");
  4311       _objs_with_preserved_marks =
  4312         new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
  4313       _preserved_marks_of_objs =
  4314         new (ResourceObj::C_HEAP, mtGC) GrowableArray<markOop>(40, true);
  4316     _objs_with_preserved_marks->push(obj);
  4317     _preserved_marks_of_objs->push(m);
  4321 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  4322                                                   size_t word_size) {
  4323   if (purpose == GCAllocForSurvived) {
  4324     HeapWord* result = survivor_attempt_allocation(word_size);
  4325     if (result != NULL) {
  4326       return result;
  4327     } else {
  4328       // Let's try to allocate in the old gen in case we can fit the
  4329       // object there.
  4330       return old_attempt_allocation(word_size);
  4332   } else {
  4333     assert(purpose ==  GCAllocForTenured, "sanity");
  4334     HeapWord* result = old_attempt_allocation(word_size);
  4335     if (result != NULL) {
  4336       return result;
  4337     } else {
  4338       // Let's try to allocate in the survivors in case we can fit the
  4339       // object there.
  4340       return survivor_attempt_allocation(word_size);
  4344   ShouldNotReachHere();
  4345   // Trying to keep some compilers happy.
  4346   return NULL;
  4349 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
  4350   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
  4352 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
  4353   : _g1h(g1h),
  4354     _refs(g1h->task_queue(queue_num)),
  4355     _dcq(&g1h->dirty_card_queue_set()),
  4356     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
  4357     _g1_rem(g1h->g1_rem_set()),
  4358     _hash_seed(17), _queue_num(queue_num),
  4359     _term_attempts(0),
  4360     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
  4361     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
  4362     _age_table(false),
  4363     _strong_roots_time(0), _term_time(0),
  4364     _alloc_buffer_waste(0), _undo_waste(0) {
  4365   // we allocate G1YoungSurvRateNumRegions plus one entries, since
  4366   // we "sacrifice" entry 0 to keep track of surviving bytes for
  4367   // non-young regions (where the age is -1)
  4368   // We also add a few elements at the beginning and at the end in
  4369   // an attempt to eliminate cache contention
  4370   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
  4371   uint array_length = PADDING_ELEM_NUM +
  4372                       real_length +
  4373                       PADDING_ELEM_NUM;
  4374   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
  4375   if (_surviving_young_words_base == NULL)
  4376     vm_exit_out_of_memory(array_length * sizeof(size_t),
  4377                           "Not enough space for young surv histo.");
  4378   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  4379   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
  4381   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
  4382   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
  4384   _start = os::elapsedTime();
  4387 void
  4388 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
  4390   st->print_raw_cr("GC Termination Stats");
  4391   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
  4392                    " ------waste (KiB)------");
  4393   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
  4394                    "  total   alloc    undo");
  4395   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
  4396                    " ------- ------- -------");
  4399 void
  4400 G1ParScanThreadState::print_termination_stats(int i,
  4401                                               outputStream* const st) const
  4403   const double elapsed_ms = elapsed_time() * 1000.0;
  4404   const double s_roots_ms = strong_roots_time() * 1000.0;
  4405   const double term_ms    = term_time() * 1000.0;
  4406   st->print_cr("%3d %9.2f %9.2f %6.2f "
  4407                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
  4408                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
  4409                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
  4410                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
  4411                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
  4412                alloc_buffer_waste() * HeapWordSize / K,
  4413                undo_waste() * HeapWordSize / K);
  4416 #ifdef ASSERT
  4417 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
  4418   assert(ref != NULL, "invariant");
  4419   assert(UseCompressedOops, "sanity");
  4420   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
  4421   oop p = oopDesc::load_decode_heap_oop(ref);
  4422   assert(_g1h->is_in_g1_reserved(p),
  4423          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4424   return true;
  4427 bool G1ParScanThreadState::verify_ref(oop* ref) const {
  4428   assert(ref != NULL, "invariant");
  4429   if (has_partial_array_mask(ref)) {
  4430     // Must be in the collection set--it's already been copied.
  4431     oop p = clear_partial_array_mask(ref);
  4432     assert(_g1h->obj_in_cs(p),
  4433            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4434   } else {
  4435     oop p = oopDesc::load_decode_heap_oop(ref);
  4436     assert(_g1h->is_in_g1_reserved(p),
  4437            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4439   return true;
  4442 bool G1ParScanThreadState::verify_task(StarTask ref) const {
  4443   if (ref.is_narrow()) {
  4444     return verify_ref((narrowOop*) ref);
  4445   } else {
  4446     return verify_ref((oop*) ref);
  4449 #endif // ASSERT
  4451 void G1ParScanThreadState::trim_queue() {
  4452   assert(_evac_cl != NULL, "not set");
  4453   assert(_evac_failure_cl != NULL, "not set");
  4454   assert(_partial_scan_cl != NULL, "not set");
  4456   StarTask ref;
  4457   do {
  4458     // Drain the overflow stack first, so other threads can steal.
  4459     while (refs()->pop_overflow(ref)) {
  4460       deal_with_reference(ref);
  4463     while (refs()->pop_local(ref)) {
  4464       deal_with_reference(ref);
  4466   } while (!refs()->is_empty());
  4469 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
  4470                                      G1ParScanThreadState* par_scan_state) :
  4471   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  4472   _par_scan_state(par_scan_state),
  4473   _worker_id(par_scan_state->queue_num()),
  4474   _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
  4475   _mark_in_progress(_g1->mark_in_progress()) { }
  4477 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
  4478 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
  4479 #ifdef ASSERT
  4480   HeapRegion* hr = _g1->heap_region_containing(obj);
  4481   assert(hr != NULL, "sanity");
  4482   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
  4483 #endif // ASSERT
  4485   // We know that the object is not moving so it's safe to read its size.
  4486   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
  4489 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
  4490 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
  4491   ::mark_forwarded_object(oop from_obj, oop to_obj) {
  4492 #ifdef ASSERT
  4493   assert(from_obj->is_forwarded(), "from obj should be forwarded");
  4494   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
  4495   assert(from_obj != to_obj, "should not be self-forwarded");
  4497   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
  4498   assert(from_hr != NULL, "sanity");
  4499   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
  4501   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
  4502   assert(to_hr != NULL, "sanity");
  4503   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
  4504 #endif // ASSERT
  4506   // The object might be in the process of being copied by another
  4507   // worker so we cannot trust that its to-space image is
  4508   // well-formed. So we have to read its size from its from-space
  4509   // image which we know should not be changing.
  4510   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
  4513 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
  4514 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
  4515   ::copy_to_survivor_space(oop old) {
  4516   size_t word_sz = old->size();
  4517   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  4518   // +1 to make the -1 indexes valid...
  4519   int       young_index = from_region->young_index_in_cset()+1;
  4520   assert( (from_region->is_young() && young_index >  0) ||
  4521          (!from_region->is_young() && young_index == 0), "invariant" );
  4522   G1CollectorPolicy* g1p = _g1->g1_policy();
  4523   markOop m = old->mark();
  4524   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  4525                                            : m->age();
  4526   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  4527                                                              word_sz);
  4528   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
  4529 #ifndef PRODUCT
  4530   // Should this evacuation fail?
  4531   if (_g1->evacuation_should_fail()) {
  4532     if (obj_ptr != NULL) {
  4533       _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
  4534       obj_ptr = NULL;
  4537 #endif // !PRODUCT
  4539   if (obj_ptr == NULL) {
  4540     // This will either forward-to-self, or detect that someone else has
  4541     // installed a forwarding pointer.
  4542     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  4543     return _g1->handle_evacuation_failure_par(cl, old);
  4546   oop obj = oop(obj_ptr);
  4548   // We're going to allocate linearly, so might as well prefetch ahead.
  4549   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
  4551   oop forward_ptr = old->forward_to_atomic(obj);
  4552   if (forward_ptr == NULL) {
  4553     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
  4554     if (g1p->track_object_age(alloc_purpose)) {
  4555       // We could simply do obj->incr_age(). However, this causes a
  4556       // performance issue. obj->incr_age() will first check whether
  4557       // the object has a displaced mark by checking its mark word;
  4558       // getting the mark word from the new location of the object
  4559       // stalls. So, given that we already have the mark word and we
  4560       // are about to install it anyway, it's better to increase the
  4561       // age on the mark word, when the object does not have a
  4562       // displaced mark word. We're not expecting many objects to have
  4563       // a displaced marked word, so that case is not optimized
  4564       // further (it could be...) and we simply call obj->incr_age().
  4566       if (m->has_displaced_mark_helper()) {
  4567         // in this case, we have to install the mark word first,
  4568         // otherwise obj looks to be forwarded (the old mark word,
  4569         // which contains the forward pointer, was copied)
  4570         obj->set_mark(m);
  4571         obj->incr_age();
  4572       } else {
  4573         m = m->incr_age();
  4574         obj->set_mark(m);
  4576       _par_scan_state->age_table()->add(obj, word_sz);
  4577     } else {
  4578       obj->set_mark(m);
  4581     size_t* surv_young_words = _par_scan_state->surviving_young_words();
  4582     surv_young_words[young_index] += word_sz;
  4584     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
  4585       // We keep track of the next start index in the length field of
  4586       // the to-space object. The actual length can be found in the
  4587       // length field of the from-space object.
  4588       arrayOop(obj)->set_length(0);
  4589       oop* old_p = set_partial_array_mask(old);
  4590       _par_scan_state->push_on_queue(old_p);
  4591     } else {
  4592       // No point in using the slower heap_region_containing() method,
  4593       // given that we know obj is in the heap.
  4594       _scanner.set_region(_g1->heap_region_containing_raw(obj));
  4595       obj->oop_iterate_backwards(&_scanner);
  4597   } else {
  4598     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
  4599     obj = forward_ptr;
  4601   return obj;
  4604 template <class T>
  4605 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
  4606   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
  4607     _scanned_klass->record_modified_oops();
  4611 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
  4612 template <class T>
  4613 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
  4614 ::do_oop_work(T* p) {
  4615   oop obj = oopDesc::load_decode_heap_oop(p);
  4616   assert(barrier != G1BarrierRS || obj != NULL,
  4617          "Precondition: G1BarrierRS implies obj is non-NULL");
  4619   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
  4621   // here the null check is implicit in the cset_fast_test() test
  4622   if (_g1->in_cset_fast_test(obj)) {
  4623     oop forwardee;
  4624     if (obj->is_forwarded()) {
  4625       forwardee = obj->forwardee();
  4626     } else {
  4627       forwardee = copy_to_survivor_space(obj);
  4629     assert(forwardee != NULL, "forwardee should not be NULL");
  4630     oopDesc::encode_store_heap_oop(p, forwardee);
  4631     if (do_mark_object && forwardee != obj) {
  4632       // If the object is self-forwarded we don't need to explicitly
  4633       // mark it, the evacuation failure protocol will do so.
  4634       mark_forwarded_object(obj, forwardee);
  4637     // When scanning the RS, we only care about objs in CS.
  4638     if (barrier == G1BarrierRS) {
  4639       _par_scan_state->update_rs(_from, p, _worker_id);
  4640     } else if (barrier == G1BarrierKlass) {
  4641       do_klass_barrier(p, forwardee);
  4643   } else {
  4644     // The object is not in collection set. If we're a root scanning
  4645     // closure during an initial mark pause (i.e. do_mark_object will
  4646     // be true) then attempt to mark the object.
  4647     if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
  4648       mark_object(obj);
  4652   if (barrier == G1BarrierEvac && obj != NULL) {
  4653     _par_scan_state->update_rs(_from, p, _worker_id);
  4656   if (do_gen_barrier && obj != NULL) {
  4657     par_do_barrier(p);
  4661 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
  4662 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
  4664 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
  4665   assert(has_partial_array_mask(p), "invariant");
  4666   oop from_obj = clear_partial_array_mask(p);
  4668   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
  4669   assert(from_obj->is_objArray(), "must be obj array");
  4670   objArrayOop from_obj_array = objArrayOop(from_obj);
  4671   // The from-space object contains the real length.
  4672   int length                 = from_obj_array->length();
  4674   assert(from_obj->is_forwarded(), "must be forwarded");
  4675   oop to_obj                 = from_obj->forwardee();
  4676   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
  4677   objArrayOop to_obj_array   = objArrayOop(to_obj);
  4678   // We keep track of the next start index in the length field of the
  4679   // to-space object.
  4680   int next_index             = to_obj_array->length();
  4681   assert(0 <= next_index && next_index < length,
  4682          err_msg("invariant, next index: %d, length: %d", next_index, length));
  4684   int start                  = next_index;
  4685   int end                    = length;
  4686   int remainder              = end - start;
  4687   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
  4688   if (remainder > 2 * ParGCArrayScanChunk) {
  4689     end = start + ParGCArrayScanChunk;
  4690     to_obj_array->set_length(end);
  4691     // Push the remainder before we process the range in case another
  4692     // worker has run out of things to do and can steal it.
  4693     oop* from_obj_p = set_partial_array_mask(from_obj);
  4694     _par_scan_state->push_on_queue(from_obj_p);
  4695   } else {
  4696     assert(length == end, "sanity");
  4697     // We'll process the final range for this object. Restore the length
  4698     // so that the heap remains parsable in case of evacuation failure.
  4699     to_obj_array->set_length(end);
  4701   _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
  4702   // Process indexes [start,end). It will also process the header
  4703   // along with the first chunk (i.e., the chunk with start == 0).
  4704   // Note that at this point the length field of to_obj_array is not
  4705   // correct given that we are using it to keep track of the next
  4706   // start index. oop_iterate_range() (thankfully!) ignores the length
  4707   // field and only relies on the start / end parameters.  It does
  4708   // however return the size of the object which will be incorrect. So
  4709   // we have to ignore it even if we wanted to use it.
  4710   to_obj_array->oop_iterate_range(&_scanner, start, end);
  4713 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4714 protected:
  4715   G1CollectedHeap*              _g1h;
  4716   G1ParScanThreadState*         _par_scan_state;
  4717   RefToScanQueueSet*            _queues;
  4718   ParallelTaskTerminator*       _terminator;
  4720   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  4721   RefToScanQueueSet*      queues()         { return _queues; }
  4722   ParallelTaskTerminator* terminator()     { return _terminator; }
  4724 public:
  4725   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  4726                                 G1ParScanThreadState* par_scan_state,
  4727                                 RefToScanQueueSet* queues,
  4728                                 ParallelTaskTerminator* terminator)
  4729     : _g1h(g1h), _par_scan_state(par_scan_state),
  4730       _queues(queues), _terminator(terminator) {}
  4732   void do_void();
  4734 private:
  4735   inline bool offer_termination();
  4736 };
  4738 bool G1ParEvacuateFollowersClosure::offer_termination() {
  4739   G1ParScanThreadState* const pss = par_scan_state();
  4740   pss->start_term_time();
  4741   const bool res = terminator()->offer_termination();
  4742   pss->end_term_time();
  4743   return res;
  4746 void G1ParEvacuateFollowersClosure::do_void() {
  4747   StarTask stolen_task;
  4748   G1ParScanThreadState* const pss = par_scan_state();
  4749   pss->trim_queue();
  4751   do {
  4752     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  4753       assert(pss->verify_task(stolen_task), "sanity");
  4754       if (stolen_task.is_narrow()) {
  4755         pss->deal_with_reference((narrowOop*) stolen_task);
  4756       } else {
  4757         pss->deal_with_reference((oop*) stolen_task);
  4760       // We've just processed a reference and we might have made
  4761       // available new entries on the queues. So we have to make sure
  4762       // we drain the queues as necessary.
  4763       pss->trim_queue();
  4765   } while (!offer_termination());
  4767   pss->retire_alloc_buffers();
  4770 class G1KlassScanClosure : public KlassClosure {
  4771  G1ParCopyHelper* _closure;
  4772  bool             _process_only_dirty;
  4773  int              _count;
  4774  public:
  4775   G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
  4776       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
  4777   void do_klass(Klass* klass) {
  4778     // If the klass has not been dirtied we know that there's
  4779     // no references into  the young gen and we can skip it.
  4780    if (!_process_only_dirty || klass->has_modified_oops()) {
  4781       // Clean the klass since we're going to scavenge all the metadata.
  4782       klass->clear_modified_oops();
  4784       // Tell the closure that this klass is the Klass to scavenge
  4785       // and is the one to dirty if oops are left pointing into the young gen.
  4786       _closure->set_scanned_klass(klass);
  4788       klass->oops_do(_closure);
  4790       _closure->set_scanned_klass(NULL);
  4792     _count++;
  4794 };
  4796 class G1ParTask : public AbstractGangTask {
  4797 protected:
  4798   G1CollectedHeap*       _g1h;
  4799   RefToScanQueueSet      *_queues;
  4800   ParallelTaskTerminator _terminator;
  4801   uint _n_workers;
  4803   Mutex _stats_lock;
  4804   Mutex* stats_lock() { return &_stats_lock; }
  4806   size_t getNCards() {
  4807     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  4808       / G1BlockOffsetSharedArray::N_bytes;
  4811 public:
  4812   G1ParTask(G1CollectedHeap* g1h,
  4813             RefToScanQueueSet *task_queues)
  4814     : AbstractGangTask("G1 collection"),
  4815       _g1h(g1h),
  4816       _queues(task_queues),
  4817       _terminator(0, _queues),
  4818       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
  4819   {}
  4821   RefToScanQueueSet* queues() { return _queues; }
  4823   RefToScanQueue *work_queue(int i) {
  4824     return queues()->queue(i);
  4827   ParallelTaskTerminator* terminator() { return &_terminator; }
  4829   virtual void set_for_termination(int active_workers) {
  4830     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
  4831     // in the young space (_par_seq_tasks) in the G1 heap
  4832     // for SequentialSubTasksDone.
  4833     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
  4834     // both of which need setting by set_n_termination().
  4835     _g1h->SharedHeap::set_n_termination(active_workers);
  4836     _g1h->set_n_termination(active_workers);
  4837     terminator()->reset_for_reuse(active_workers);
  4838     _n_workers = active_workers;
  4841   void work(uint worker_id) {
  4842     if (worker_id >= _n_workers) return;  // no work needed this round
  4844     double start_time_ms = os::elapsedTime() * 1000.0;
  4845     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
  4848       ResourceMark rm;
  4849       HandleMark   hm;
  4851       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
  4853       G1ParScanThreadState            pss(_g1h, worker_id);
  4854       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
  4855       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
  4856       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
  4858       pss.set_evac_closure(&scan_evac_cl);
  4859       pss.set_evac_failure_closure(&evac_failure_cl);
  4860       pss.set_partial_scan_closure(&partial_scan_cl);
  4862       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
  4863       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
  4865       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
  4866       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
  4868       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
  4869       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
  4870       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
  4872       OopClosure*                    scan_root_cl = &only_scan_root_cl;
  4873       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
  4875       if (_g1h->g1_policy()->during_initial_mark_pause()) {
  4876         // We also need to mark copied objects.
  4877         scan_root_cl = &scan_mark_root_cl;
  4878         scan_klasses_cl = &scan_mark_klasses_cl_s;
  4881       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  4883       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
  4885       pss.start_strong_roots();
  4886       _g1h->g1_process_strong_roots(/* is scavenging */ true,
  4887                                     SharedHeap::ScanningOption(so),
  4888                                     scan_root_cl,
  4889                                     &push_heap_rs_cl,
  4890                                     scan_klasses_cl,
  4891                                     worker_id);
  4892       pss.end_strong_roots();
  4895         double start = os::elapsedTime();
  4896         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  4897         evac.do_void();
  4898         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  4899         double term_ms = pss.term_time()*1000.0;
  4900         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
  4901         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
  4903       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  4904       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  4906       if (ParallelGCVerbose) {
  4907         MutexLocker x(stats_lock());
  4908         pss.print_termination_stats(worker_id);
  4911       assert(pss.refs()->is_empty(), "should be empty");
  4913       // Close the inner scope so that the ResourceMark and HandleMark
  4914       // destructors are executed here and are included as part of the
  4915       // "GC Worker Time".
  4918     double end_time_ms = os::elapsedTime() * 1000.0;
  4919     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
  4921 };
  4923 // *** Common G1 Evacuation Stuff
  4925 // Closures that support the filtering of CodeBlobs scanned during
  4926 // external root scanning.
  4928 // Closure applied to reference fields in code blobs (specifically nmethods)
  4929 // to determine whether an nmethod contains references that point into
  4930 // the collection set. Used as a predicate when walking code roots so
  4931 // that only nmethods that point into the collection set are added to the
  4932 // 'marked' list.
  4934 class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
  4936   class G1PointsIntoCSOopClosure : public OopClosure {
  4937     G1CollectedHeap* _g1;
  4938     bool _points_into_cs;
  4939   public:
  4940     G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
  4941       _g1(g1), _points_into_cs(false) { }
  4943     bool points_into_cs() const { return _points_into_cs; }
  4945     template <class T>
  4946     void do_oop_nv(T* p) {
  4947       if (!_points_into_cs) {
  4948         T heap_oop = oopDesc::load_heap_oop(p);
  4949         if (!oopDesc::is_null(heap_oop) &&
  4950             _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
  4951           _points_into_cs = true;
  4956     virtual void do_oop(oop* p)        { do_oop_nv(p); }
  4957     virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
  4958   };
  4960   G1CollectedHeap* _g1;
  4962 public:
  4963   G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
  4964     CodeBlobToOopClosure(cl, true), _g1(g1) { }
  4966   virtual void do_code_blob(CodeBlob* cb) {
  4967     nmethod* nm = cb->as_nmethod_or_null();
  4968     if (nm != NULL && !(nm->test_oops_do_mark())) {
  4969       G1PointsIntoCSOopClosure predicate_cl(_g1);
  4970       nm->oops_do(&predicate_cl);
  4972       if (predicate_cl.points_into_cs()) {
  4973         // At least one of the reference fields or the oop relocations
  4974         // in the nmethod points into the collection set. We have to
  4975         // 'mark' this nmethod.
  4976         // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
  4977         // or MarkingCodeBlobClosure::do_code_blob() change.
  4978         if (!nm->test_set_oops_do_mark()) {
  4979           do_newly_marked_nmethod(nm);
  4984 };
  4986 // This method is run in a GC worker.
  4988 void
  4989 G1CollectedHeap::
  4990 g1_process_strong_roots(bool is_scavenging,
  4991                         ScanningOption so,
  4992                         OopClosure* scan_non_heap_roots,
  4993                         OopsInHeapRegionClosure* scan_rs,
  4994                         G1KlassScanClosure* scan_klasses,
  4995                         int worker_i) {
  4997   // First scan the strong roots
  4998   double ext_roots_start = os::elapsedTime();
  4999   double closure_app_time_sec = 0.0;
  5001   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  5003   // Walk the code cache w/o buffering, because StarTask cannot handle
  5004   // unaligned oop locations.
  5005   G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
  5007   process_strong_roots(false, // no scoping; this is parallel code
  5008                        is_scavenging, so,
  5009                        &buf_scan_non_heap_roots,
  5010                        &eager_scan_code_roots,
  5011                        scan_klasses
  5012                        );
  5014   // Now the CM ref_processor roots.
  5015   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  5016     // We need to treat the discovered reference lists of the
  5017     // concurrent mark ref processor as roots and keep entries
  5018     // (which are added by the marking threads) on them live
  5019     // until they can be processed at the end of marking.
  5020     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
  5023   // Finish up any enqueued closure apps (attributed as object copy time).
  5024   buf_scan_non_heap_roots.done();
  5026   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
  5028   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  5030   double ext_root_time_ms =
  5031     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  5033   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  5035   // During conc marking we have to filter the per-thread SATB buffers
  5036   // to make sure we remove any oops into the CSet (which will show up
  5037   // as implicitly live).
  5038   double satb_filtering_ms = 0.0;
  5039   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
  5040     if (mark_in_progress()) {
  5041       double satb_filter_start = os::elapsedTime();
  5043       JavaThread::satb_mark_queue_set().filter_thread_buffers();
  5045       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
  5048   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
  5050   // Now scan the complement of the collection set.
  5051   if (scan_rs != NULL) {
  5052     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
  5054   _process_strong_tasks->all_tasks_completed();
  5057 void
  5058 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
  5059                                        OopClosure* non_root_closure) {
  5060   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
  5061   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
  5064 // Weak Reference Processing support
  5066 // An always "is_alive" closure that is used to preserve referents.
  5067 // If the object is non-null then it's alive.  Used in the preservation
  5068 // of referent objects that are pointed to by reference objects
  5069 // discovered by the CM ref processor.
  5070 class G1AlwaysAliveClosure: public BoolObjectClosure {
  5071   G1CollectedHeap* _g1;
  5072 public:
  5073   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  5074   void do_object(oop p) { assert(false, "Do not call."); }
  5075   bool do_object_b(oop p) {
  5076     if (p != NULL) {
  5077       return true;
  5079     return false;
  5081 };
  5083 bool G1STWIsAliveClosure::do_object_b(oop p) {
  5084   // An object is reachable if it is outside the collection set,
  5085   // or is inside and copied.
  5086   return !_g1->obj_in_cs(p) || p->is_forwarded();
  5089 // Non Copying Keep Alive closure
  5090 class G1KeepAliveClosure: public OopClosure {
  5091   G1CollectedHeap* _g1;
  5092 public:
  5093   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  5094   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  5095   void do_oop(      oop* p) {
  5096     oop obj = *p;
  5098     if (_g1->obj_in_cs(obj)) {
  5099       assert( obj->is_forwarded(), "invariant" );
  5100       *p = obj->forwardee();
  5103 };
  5105 // Copying Keep Alive closure - can be called from both
  5106 // serial and parallel code as long as different worker
  5107 // threads utilize different G1ParScanThreadState instances
  5108 // and different queues.
  5110 class G1CopyingKeepAliveClosure: public OopClosure {
  5111   G1CollectedHeap*         _g1h;
  5112   OopClosure*              _copy_non_heap_obj_cl;
  5113   OopsInHeapRegionClosure* _copy_metadata_obj_cl;
  5114   G1ParScanThreadState*    _par_scan_state;
  5116 public:
  5117   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
  5118                             OopClosure* non_heap_obj_cl,
  5119                             OopsInHeapRegionClosure* metadata_obj_cl,
  5120                             G1ParScanThreadState* pss):
  5121     _g1h(g1h),
  5122     _copy_non_heap_obj_cl(non_heap_obj_cl),
  5123     _copy_metadata_obj_cl(metadata_obj_cl),
  5124     _par_scan_state(pss)
  5125   {}
  5127   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  5128   virtual void do_oop(      oop* p) { do_oop_work(p); }
  5130   template <class T> void do_oop_work(T* p) {
  5131     oop obj = oopDesc::load_decode_heap_oop(p);
  5133     if (_g1h->obj_in_cs(obj)) {
  5134       // If the referent object has been forwarded (either copied
  5135       // to a new location or to itself in the event of an
  5136       // evacuation failure) then we need to update the reference
  5137       // field and, if both reference and referent are in the G1
  5138       // heap, update the RSet for the referent.
  5139       //
  5140       // If the referent has not been forwarded then we have to keep
  5141       // it alive by policy. Therefore we have copy the referent.
  5142       //
  5143       // If the reference field is in the G1 heap then we can push
  5144       // on the PSS queue. When the queue is drained (after each
  5145       // phase of reference processing) the object and it's followers
  5146       // will be copied, the reference field set to point to the
  5147       // new location, and the RSet updated. Otherwise we need to
  5148       // use the the non-heap or metadata closures directly to copy
  5149       // the refernt object and update the pointer, while avoiding
  5150       // updating the RSet.
  5152       if (_g1h->is_in_g1_reserved(p)) {
  5153         _par_scan_state->push_on_queue(p);
  5154       } else {
  5155         assert(!ClassLoaderDataGraph::contains((address)p),
  5156                err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
  5157                               PTR_FORMAT, p));
  5158           _copy_non_heap_obj_cl->do_oop(p);
  5162 };
  5164 // Serial drain queue closure. Called as the 'complete_gc'
  5165 // closure for each discovered list in some of the
  5166 // reference processing phases.
  5168 class G1STWDrainQueueClosure: public VoidClosure {
  5169 protected:
  5170   G1CollectedHeap* _g1h;
  5171   G1ParScanThreadState* _par_scan_state;
  5173   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  5175 public:
  5176   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
  5177     _g1h(g1h),
  5178     _par_scan_state(pss)
  5179   { }
  5181   void do_void() {
  5182     G1ParScanThreadState* const pss = par_scan_state();
  5183     pss->trim_queue();
  5185 };
  5187 // Parallel Reference Processing closures
  5189 // Implementation of AbstractRefProcTaskExecutor for parallel reference
  5190 // processing during G1 evacuation pauses.
  5192 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
  5193 private:
  5194   G1CollectedHeap*   _g1h;
  5195   RefToScanQueueSet* _queues;
  5196   FlexibleWorkGang*  _workers;
  5197   int                _active_workers;
  5199 public:
  5200   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
  5201                         FlexibleWorkGang* workers,
  5202                         RefToScanQueueSet *task_queues,
  5203                         int n_workers) :
  5204     _g1h(g1h),
  5205     _queues(task_queues),
  5206     _workers(workers),
  5207     _active_workers(n_workers)
  5209     assert(n_workers > 0, "shouldn't call this otherwise");
  5212   // Executes the given task using concurrent marking worker threads.
  5213   virtual void execute(ProcessTask& task);
  5214   virtual void execute(EnqueueTask& task);
  5215 };
  5217 // Gang task for possibly parallel reference processing
  5219 class G1STWRefProcTaskProxy: public AbstractGangTask {
  5220   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
  5221   ProcessTask&     _proc_task;
  5222   G1CollectedHeap* _g1h;
  5223   RefToScanQueueSet *_task_queues;
  5224   ParallelTaskTerminator* _terminator;
  5226 public:
  5227   G1STWRefProcTaskProxy(ProcessTask& proc_task,
  5228                      G1CollectedHeap* g1h,
  5229                      RefToScanQueueSet *task_queues,
  5230                      ParallelTaskTerminator* terminator) :
  5231     AbstractGangTask("Process reference objects in parallel"),
  5232     _proc_task(proc_task),
  5233     _g1h(g1h),
  5234     _task_queues(task_queues),
  5235     _terminator(terminator)
  5236   {}
  5238   virtual void work(uint worker_id) {
  5239     // The reference processing task executed by a single worker.
  5240     ResourceMark rm;
  5241     HandleMark   hm;
  5243     G1STWIsAliveClosure is_alive(_g1h);
  5245     G1ParScanThreadState pss(_g1h, worker_id);
  5247     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
  5248     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
  5249     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
  5251     pss.set_evac_closure(&scan_evac_cl);
  5252     pss.set_evac_failure_closure(&evac_failure_cl);
  5253     pss.set_partial_scan_closure(&partial_scan_cl);
  5255     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
  5256     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
  5258     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
  5259     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
  5261     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5262     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
  5264     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5265       // We also need to mark copied objects.
  5266       copy_non_heap_cl = &copy_mark_non_heap_cl;
  5267       copy_metadata_cl = &copy_mark_metadata_cl;
  5270     // Keep alive closure.
  5271     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
  5273     // Complete GC closure
  5274     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
  5276     // Call the reference processing task's work routine.
  5277     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
  5279     // Note we cannot assert that the refs array is empty here as not all
  5280     // of the processing tasks (specifically phase2 - pp2_work) execute
  5281     // the complete_gc closure (which ordinarily would drain the queue) so
  5282     // the queue may not be empty.
  5284 };
  5286 // Driver routine for parallel reference processing.
  5287 // Creates an instance of the ref processing gang
  5288 // task and has the worker threads execute it.
  5289 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
  5290   assert(_workers != NULL, "Need parallel worker threads.");
  5292   ParallelTaskTerminator terminator(_active_workers, _queues);
  5293   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
  5295   _g1h->set_par_threads(_active_workers);
  5296   _workers->run_task(&proc_task_proxy);
  5297   _g1h->set_par_threads(0);
  5300 // Gang task for parallel reference enqueueing.
  5302 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
  5303   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
  5304   EnqueueTask& _enq_task;
  5306 public:
  5307   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
  5308     AbstractGangTask("Enqueue reference objects in parallel"),
  5309     _enq_task(enq_task)
  5310   { }
  5312   virtual void work(uint worker_id) {
  5313     _enq_task.work(worker_id);
  5315 };
  5317 // Driver routine for parallel reference enqueing.
  5318 // Creates an instance of the ref enqueueing gang
  5319 // task and has the worker threads execute it.
  5321 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
  5322   assert(_workers != NULL, "Need parallel worker threads.");
  5324   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
  5326   _g1h->set_par_threads(_active_workers);
  5327   _workers->run_task(&enq_task_proxy);
  5328   _g1h->set_par_threads(0);
  5331 // End of weak reference support closures
  5333 // Abstract task used to preserve (i.e. copy) any referent objects
  5334 // that are in the collection set and are pointed to by reference
  5335 // objects discovered by the CM ref processor.
  5337 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
  5338 protected:
  5339   G1CollectedHeap* _g1h;
  5340   RefToScanQueueSet      *_queues;
  5341   ParallelTaskTerminator _terminator;
  5342   uint _n_workers;
  5344 public:
  5345   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
  5346     AbstractGangTask("ParPreserveCMReferents"),
  5347     _g1h(g1h),
  5348     _queues(task_queues),
  5349     _terminator(workers, _queues),
  5350     _n_workers(workers)
  5351   { }
  5353   void work(uint worker_id) {
  5354     ResourceMark rm;
  5355     HandleMark   hm;
  5357     G1ParScanThreadState            pss(_g1h, worker_id);
  5358     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
  5359     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
  5360     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
  5362     pss.set_evac_closure(&scan_evac_cl);
  5363     pss.set_evac_failure_closure(&evac_failure_cl);
  5364     pss.set_partial_scan_closure(&partial_scan_cl);
  5366     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
  5369     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
  5370     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
  5372     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
  5373     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
  5375     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5376     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
  5378     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5379       // We also need to mark copied objects.
  5380       copy_non_heap_cl = &copy_mark_non_heap_cl;
  5381       copy_metadata_cl = &copy_mark_metadata_cl;
  5384     // Is alive closure
  5385     G1AlwaysAliveClosure always_alive(_g1h);
  5387     // Copying keep alive closure. Applied to referent objects that need
  5388     // to be copied.
  5389     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
  5391     ReferenceProcessor* rp = _g1h->ref_processor_cm();
  5393     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
  5394     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
  5396     // limit is set using max_num_q() - which was set using ParallelGCThreads.
  5397     // So this must be true - but assert just in case someone decides to
  5398     // change the worker ids.
  5399     assert(0 <= worker_id && worker_id < limit, "sanity");
  5400     assert(!rp->discovery_is_atomic(), "check this code");
  5402     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
  5403     for (uint idx = worker_id; idx < limit; idx += stride) {
  5404       DiscoveredList& ref_list = rp->discovered_refs()[idx];
  5406       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
  5407       while (iter.has_next()) {
  5408         // Since discovery is not atomic for the CM ref processor, we
  5409         // can see some null referent objects.
  5410         iter.load_ptrs(DEBUG_ONLY(true));
  5411         oop ref = iter.obj();
  5413         // This will filter nulls.
  5414         if (iter.is_referent_alive()) {
  5415           iter.make_referent_alive();
  5417         iter.move_to_next();
  5421     // Drain the queue - which may cause stealing
  5422     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
  5423     drain_queue.do_void();
  5424     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
  5425     assert(pss.refs()->is_empty(), "should be");
  5427 };
  5429 // Weak Reference processing during an evacuation pause (part 1).
  5430 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
  5431   double ref_proc_start = os::elapsedTime();
  5433   ReferenceProcessor* rp = _ref_processor_stw;
  5434   assert(rp->discovery_enabled(), "should have been enabled");
  5436   // Any reference objects, in the collection set, that were 'discovered'
  5437   // by the CM ref processor should have already been copied (either by
  5438   // applying the external root copy closure to the discovered lists, or
  5439   // by following an RSet entry).
  5440   //
  5441   // But some of the referents, that are in the collection set, that these
  5442   // reference objects point to may not have been copied: the STW ref
  5443   // processor would have seen that the reference object had already
  5444   // been 'discovered' and would have skipped discovering the reference,
  5445   // but would not have treated the reference object as a regular oop.
  5446   // As a reult the copy closure would not have been applied to the
  5447   // referent object.
  5448   //
  5449   // We need to explicitly copy these referent objects - the references
  5450   // will be processed at the end of remarking.
  5451   //
  5452   // We also need to do this copying before we process the reference
  5453   // objects discovered by the STW ref processor in case one of these
  5454   // referents points to another object which is also referenced by an
  5455   // object discovered by the STW ref processor.
  5457   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
  5458            no_of_gc_workers == workers()->active_workers(),
  5459            "Need to reset active GC workers");
  5461   set_par_threads(no_of_gc_workers);
  5462   G1ParPreserveCMReferentsTask keep_cm_referents(this,
  5463                                                  no_of_gc_workers,
  5464                                                  _task_queues);
  5466   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5467     workers()->run_task(&keep_cm_referents);
  5468   } else {
  5469     keep_cm_referents.work(0);
  5472   set_par_threads(0);
  5474   // Closure to test whether a referent is alive.
  5475   G1STWIsAliveClosure is_alive(this);
  5477   // Even when parallel reference processing is enabled, the processing
  5478   // of JNI refs is serial and performed serially by the current thread
  5479   // rather than by a worker. The following PSS will be used for processing
  5480   // JNI refs.
  5482   // Use only a single queue for this PSS.
  5483   G1ParScanThreadState pss(this, 0);
  5485   // We do not embed a reference processor in the copying/scanning
  5486   // closures while we're actually processing the discovered
  5487   // reference objects.
  5488   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
  5489   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
  5490   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
  5492   pss.set_evac_closure(&scan_evac_cl);
  5493   pss.set_evac_failure_closure(&evac_failure_cl);
  5494   pss.set_partial_scan_closure(&partial_scan_cl);
  5496   assert(pss.refs()->is_empty(), "pre-condition");
  5498   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
  5499   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
  5501   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
  5502   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
  5504   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5505   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
  5507   if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5508     // We also need to mark copied objects.
  5509     copy_non_heap_cl = &copy_mark_non_heap_cl;
  5510     copy_metadata_cl = &copy_mark_metadata_cl;
  5513   // Keep alive closure.
  5514   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
  5516   // Serial Complete GC closure
  5517   G1STWDrainQueueClosure drain_queue(this, &pss);
  5519   // Setup the soft refs policy...
  5520   rp->setup_policy(false);
  5522   if (!rp->processing_is_mt()) {
  5523     // Serial reference processing...
  5524     rp->process_discovered_references(&is_alive,
  5525                                       &keep_alive,
  5526                                       &drain_queue,
  5527                                       NULL);
  5528   } else {
  5529     // Parallel reference processing
  5530     assert(rp->num_q() == no_of_gc_workers, "sanity");
  5531     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
  5533     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
  5534     rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
  5537   // We have completed copying any necessary live referent objects
  5538   // (that were not copied during the actual pause) so we can
  5539   // retire any active alloc buffers
  5540   pss.retire_alloc_buffers();
  5541   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
  5543   double ref_proc_time = os::elapsedTime() - ref_proc_start;
  5544   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
  5547 // Weak Reference processing during an evacuation pause (part 2).
  5548 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
  5549   double ref_enq_start = os::elapsedTime();
  5551   ReferenceProcessor* rp = _ref_processor_stw;
  5552   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
  5554   // Now enqueue any remaining on the discovered lists on to
  5555   // the pending list.
  5556   if (!rp->processing_is_mt()) {
  5557     // Serial reference processing...
  5558     rp->enqueue_discovered_references();
  5559   } else {
  5560     // Parallel reference enqueuing
  5562     assert(no_of_gc_workers == workers()->active_workers(),
  5563            "Need to reset active workers");
  5564     assert(rp->num_q() == no_of_gc_workers, "sanity");
  5565     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
  5567     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
  5568     rp->enqueue_discovered_references(&par_task_executor);
  5571   rp->verify_no_references_recorded();
  5572   assert(!rp->discovery_enabled(), "should have been disabled");
  5574   // FIXME
  5575   // CM's reference processing also cleans up the string and symbol tables.
  5576   // Should we do that here also? We could, but it is a serial operation
  5577   // and could signicantly increase the pause time.
  5579   double ref_enq_time = os::elapsedTime() - ref_enq_start;
  5580   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
  5583 void G1CollectedHeap::evacuate_collection_set() {
  5584   _expand_heap_after_alloc_failure = true;
  5585   set_evacuation_failed(false);
  5587   // Should G1EvacuationFailureALot be in effect for this GC?
  5588   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
  5590   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  5591   concurrent_g1_refine()->set_use_cache(false);
  5592   concurrent_g1_refine()->clear_hot_cache_claimed_index();
  5594   uint n_workers;
  5595   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5596     n_workers =
  5597       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  5598                                      workers()->active_workers(),
  5599                                      Threads::number_of_non_daemon_threads());
  5600     assert(UseDynamicNumberOfGCThreads ||
  5601            n_workers == workers()->total_workers(),
  5602            "If not dynamic should be using all the  workers");
  5603     workers()->set_active_workers(n_workers);
  5604     set_par_threads(n_workers);
  5605   } else {
  5606     assert(n_par_threads() == 0,
  5607            "Should be the original non-parallel value");
  5608     n_workers = 1;
  5611   G1ParTask g1_par_task(this, _task_queues);
  5613   init_for_evac_failure(NULL);
  5615   rem_set()->prepare_for_younger_refs_iterate(true);
  5617   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  5618   double start_par_time_sec = os::elapsedTime();
  5619   double end_par_time_sec;
  5622     StrongRootsScope srs(this);
  5624     if (G1CollectedHeap::use_parallel_gc_threads()) {
  5625       // The individual threads will set their evac-failure closures.
  5626       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  5627       // These tasks use ShareHeap::_process_strong_tasks
  5628       assert(UseDynamicNumberOfGCThreads ||
  5629              workers()->active_workers() == workers()->total_workers(),
  5630              "If not dynamic should be using all the  workers");
  5631       workers()->run_task(&g1_par_task);
  5632     } else {
  5633       g1_par_task.set_for_termination(n_workers);
  5634       g1_par_task.work(0);
  5636     end_par_time_sec = os::elapsedTime();
  5638     // Closing the inner scope will execute the destructor
  5639     // for the StrongRootsScope object. We record the current
  5640     // elapsed time before closing the scope so that time
  5641     // taken for the SRS destructor is NOT included in the
  5642     // reported parallel time.
  5645   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
  5646   g1_policy()->phase_times()->record_par_time(par_time_ms);
  5648   double code_root_fixup_time_ms =
  5649         (os::elapsedTime() - end_par_time_sec) * 1000.0;
  5650   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
  5652   set_par_threads(0);
  5654   // Process any discovered reference objects - we have
  5655   // to do this _before_ we retire the GC alloc regions
  5656   // as we may have to copy some 'reachable' referent
  5657   // objects (and their reachable sub-graphs) that were
  5658   // not copied during the pause.
  5659   process_discovered_references(n_workers);
  5661   // Weak root processing.
  5662   // Note: when JSR 292 is enabled and code blobs can contain
  5663   // non-perm oops then we will need to process the code blobs
  5664   // here too.
  5666     G1STWIsAliveClosure is_alive(this);
  5667     G1KeepAliveClosure keep_alive(this);
  5668     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  5671   release_gc_alloc_regions(n_workers);
  5672   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  5674   concurrent_g1_refine()->clear_hot_cache();
  5675   concurrent_g1_refine()->set_use_cache(true);
  5677   finalize_for_evac_failure();
  5679   if (evacuation_failed()) {
  5680     remove_self_forwarding_pointers();
  5682     // Reset the G1EvacuationFailureALot counters and flags
  5683     // Note: the values are reset only when an actual
  5684     // evacuation failure occurs.
  5685     NOT_PRODUCT(reset_evacuation_should_fail();)
  5688   // Enqueue any remaining references remaining on the STW
  5689   // reference processor's discovered lists. We need to do
  5690   // this after the card table is cleaned (and verified) as
  5691   // the act of enqueuing entries on to the pending list
  5692   // will log these updates (and dirty their associated
  5693   // cards). We need these updates logged to update any
  5694   // RSets.
  5695   enqueue_discovered_references(n_workers);
  5697   if (G1DeferredRSUpdate) {
  5698     RedirtyLoggedCardTableEntryFastClosure redirty;
  5699     dirty_card_queue_set().set_closure(&redirty);
  5700     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  5702     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
  5703     dcq.merge_bufferlists(&dirty_card_queue_set());
  5704     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  5706   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  5709 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
  5710                                      size_t* pre_used,
  5711                                      FreeRegionList* free_list,
  5712                                      OldRegionSet* old_proxy_set,
  5713                                      HumongousRegionSet* humongous_proxy_set,
  5714                                      HRRSCleanupTask* hrrs_cleanup_task,
  5715                                      bool par) {
  5716   if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
  5717     if (hr->isHumongous()) {
  5718       assert(hr->startsHumongous(), "we should only see starts humongous");
  5719       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
  5720     } else {
  5721       _old_set.remove_with_proxy(hr, old_proxy_set);
  5722       free_region(hr, pre_used, free_list, par);
  5724   } else {
  5725     hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
  5729 void G1CollectedHeap::free_region(HeapRegion* hr,
  5730                                   size_t* pre_used,
  5731                                   FreeRegionList* free_list,
  5732                                   bool par) {
  5733   assert(!hr->isHumongous(), "this is only for non-humongous regions");
  5734   assert(!hr->is_empty(), "the region should not be empty");
  5735   assert(free_list != NULL, "pre-condition");
  5737   *pre_used += hr->used();
  5738   hr->hr_clear(par, true /* clear_space */);
  5739   free_list->add_as_head(hr);
  5742 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
  5743                                      size_t* pre_used,
  5744                                      FreeRegionList* free_list,
  5745                                      HumongousRegionSet* humongous_proxy_set,
  5746                                      bool par) {
  5747   assert(hr->startsHumongous(), "this is only for starts humongous regions");
  5748   assert(free_list != NULL, "pre-condition");
  5749   assert(humongous_proxy_set != NULL, "pre-condition");
  5751   size_t hr_used = hr->used();
  5752   size_t hr_capacity = hr->capacity();
  5753   size_t hr_pre_used = 0;
  5754   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
  5755   // We need to read this before we make the region non-humongous,
  5756   // otherwise the information will be gone.
  5757   uint last_index = hr->last_hc_index();
  5758   hr->set_notHumongous();
  5759   free_region(hr, &hr_pre_used, free_list, par);
  5761   uint i = hr->hrs_index() + 1;
  5762   while (i < last_index) {
  5763     HeapRegion* curr_hr = region_at(i);
  5764     assert(curr_hr->continuesHumongous(), "invariant");
  5765     curr_hr->set_notHumongous();
  5766     free_region(curr_hr, &hr_pre_used, free_list, par);
  5767     i += 1;
  5769   assert(hr_pre_used == hr_used,
  5770          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
  5771                  "should be the same", hr_pre_used, hr_used));
  5772   *pre_used += hr_pre_used;
  5775 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
  5776                                        FreeRegionList* free_list,
  5777                                        OldRegionSet* old_proxy_set,
  5778                                        HumongousRegionSet* humongous_proxy_set,
  5779                                        bool par) {
  5780   if (pre_used > 0) {
  5781     Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
  5782     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  5783     assert(_summary_bytes_used >= pre_used,
  5784            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
  5785                    "should be >= pre_used: "SIZE_FORMAT,
  5786                    _summary_bytes_used, pre_used));
  5787     _summary_bytes_used -= pre_used;
  5789   if (free_list != NULL && !free_list->is_empty()) {
  5790     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  5791     _free_list.add_as_head(free_list);
  5793   if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
  5794     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
  5795     _old_set.update_from_proxy(old_proxy_set);
  5797   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
  5798     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
  5799     _humongous_set.update_from_proxy(humongous_proxy_set);
  5803 class G1ParCleanupCTTask : public AbstractGangTask {
  5804   CardTableModRefBS* _ct_bs;
  5805   G1CollectedHeap* _g1h;
  5806   HeapRegion* volatile _su_head;
  5807 public:
  5808   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
  5809                      G1CollectedHeap* g1h) :
  5810     AbstractGangTask("G1 Par Cleanup CT Task"),
  5811     _ct_bs(ct_bs), _g1h(g1h) { }
  5813   void work(uint worker_id) {
  5814     HeapRegion* r;
  5815     while (r = _g1h->pop_dirty_cards_region()) {
  5816       clear_cards(r);
  5820   void clear_cards(HeapRegion* r) {
  5821     // Cards of the survivors should have already been dirtied.
  5822     if (!r->is_survivor()) {
  5823       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  5826 };
  5828 #ifndef PRODUCT
  5829 class G1VerifyCardTableCleanup: public HeapRegionClosure {
  5830   G1CollectedHeap* _g1h;
  5831   CardTableModRefBS* _ct_bs;
  5832 public:
  5833   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
  5834     : _g1h(g1h), _ct_bs(ct_bs) { }
  5835   virtual bool doHeapRegion(HeapRegion* r) {
  5836     if (r->is_survivor()) {
  5837       _g1h->verify_dirty_region(r);
  5838     } else {
  5839       _g1h->verify_not_dirty_region(r);
  5841     return false;
  5843 };
  5845 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
  5846   // All of the region should be clean.
  5847   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
  5848   MemRegion mr(hr->bottom(), hr->end());
  5849   ct_bs->verify_not_dirty_region(mr);
  5852 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
  5853   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
  5854   // dirty allocated blocks as they allocate them. The thread that
  5855   // retires each region and replaces it with a new one will do a
  5856   // maximal allocation to fill in [pre_dummy_top(),end()] but will
  5857   // not dirty that area (one less thing to have to do while holding
  5858   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
  5859   // is dirty.
  5860   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
  5861   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
  5862   ct_bs->verify_dirty_region(mr);
  5865 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
  5866   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
  5867   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
  5868     verify_dirty_region(hr);
  5872 void G1CollectedHeap::verify_dirty_young_regions() {
  5873   verify_dirty_young_list(_young_list->first_region());
  5875 #endif
  5877 void G1CollectedHeap::cleanUpCardTable() {
  5878   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  5879   double start = os::elapsedTime();
  5882     // Iterate over the dirty cards region list.
  5883     G1ParCleanupCTTask cleanup_task(ct_bs, this);
  5885     if (G1CollectedHeap::use_parallel_gc_threads()) {
  5886       set_par_threads();
  5887       workers()->run_task(&cleanup_task);
  5888       set_par_threads(0);
  5889     } else {
  5890       while (_dirty_cards_region_list) {
  5891         HeapRegion* r = _dirty_cards_region_list;
  5892         cleanup_task.clear_cards(r);
  5893         _dirty_cards_region_list = r->get_next_dirty_cards_region();
  5894         if (_dirty_cards_region_list == r) {
  5895           // The last region.
  5896           _dirty_cards_region_list = NULL;
  5898         r->set_next_dirty_cards_region(NULL);
  5901 #ifndef PRODUCT
  5902     if (G1VerifyCTCleanup || VerifyAfterGC) {
  5903       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
  5904       heap_region_iterate(&cleanup_verifier);
  5906 #endif
  5909   double elapsed = os::elapsedTime() - start;
  5910   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
  5913 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  5914   size_t pre_used = 0;
  5915   FreeRegionList local_free_list("Local List for CSet Freeing");
  5917   double young_time_ms     = 0.0;
  5918   double non_young_time_ms = 0.0;
  5920   // Since the collection set is a superset of the the young list,
  5921   // all we need to do to clear the young list is clear its
  5922   // head and length, and unlink any young regions in the code below
  5923   _young_list->clear();
  5925   G1CollectorPolicy* policy = g1_policy();
  5927   double start_sec = os::elapsedTime();
  5928   bool non_young = true;
  5930   HeapRegion* cur = cs_head;
  5931   int age_bound = -1;
  5932   size_t rs_lengths = 0;
  5934   while (cur != NULL) {
  5935     assert(!is_on_master_free_list(cur), "sanity");
  5936     if (non_young) {
  5937       if (cur->is_young()) {
  5938         double end_sec = os::elapsedTime();
  5939         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5940         non_young_time_ms += elapsed_ms;
  5942         start_sec = os::elapsedTime();
  5943         non_young = false;
  5945     } else {
  5946       if (!cur->is_young()) {
  5947         double end_sec = os::elapsedTime();
  5948         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5949         young_time_ms += elapsed_ms;
  5951         start_sec = os::elapsedTime();
  5952         non_young = true;
  5956     rs_lengths += cur->rem_set()->occupied();
  5958     HeapRegion* next = cur->next_in_collection_set();
  5959     assert(cur->in_collection_set(), "bad CS");
  5960     cur->set_next_in_collection_set(NULL);
  5961     cur->set_in_collection_set(false);
  5963     if (cur->is_young()) {
  5964       int index = cur->young_index_in_cset();
  5965       assert(index != -1, "invariant");
  5966       assert((uint) index < policy->young_cset_region_length(), "invariant");
  5967       size_t words_survived = _surviving_young_words[index];
  5968       cur->record_surv_words_in_group(words_survived);
  5970       // At this point the we have 'popped' cur from the collection set
  5971       // (linked via next_in_collection_set()) but it is still in the
  5972       // young list (linked via next_young_region()). Clear the
  5973       // _next_young_region field.
  5974       cur->set_next_young_region(NULL);
  5975     } else {
  5976       int index = cur->young_index_in_cset();
  5977       assert(index == -1, "invariant");
  5980     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  5981             (!cur->is_young() && cur->young_index_in_cset() == -1),
  5982             "invariant" );
  5984     if (!cur->evacuation_failed()) {
  5985       MemRegion used_mr = cur->used_region();
  5987       // And the region is empty.
  5988       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
  5989       free_region(cur, &pre_used, &local_free_list, false /* par */);
  5990     } else {
  5991       cur->uninstall_surv_rate_group();
  5992       if (cur->is_young()) {
  5993         cur->set_young_index_in_cset(-1);
  5995       cur->set_not_young();
  5996       cur->set_evacuation_failed(false);
  5997       // The region is now considered to be old.
  5998       _old_set.add(cur);
  6000     cur = next;
  6003   policy->record_max_rs_lengths(rs_lengths);
  6004   policy->cset_regions_freed();
  6006   double end_sec = os::elapsedTime();
  6007   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  6009   if (non_young) {
  6010     non_young_time_ms += elapsed_ms;
  6011   } else {
  6012     young_time_ms += elapsed_ms;
  6015   update_sets_after_freeing_regions(pre_used, &local_free_list,
  6016                                     NULL /* old_proxy_set */,
  6017                                     NULL /* humongous_proxy_set */,
  6018                                     false /* par */);
  6019   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
  6020   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
  6023 // This routine is similar to the above but does not record
  6024 // any policy statistics or update free lists; we are abandoning
  6025 // the current incremental collection set in preparation of a
  6026 // full collection. After the full GC we will start to build up
  6027 // the incremental collection set again.
  6028 // This is only called when we're doing a full collection
  6029 // and is immediately followed by the tearing down of the young list.
  6031 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
  6032   HeapRegion* cur = cs_head;
  6034   while (cur != NULL) {
  6035     HeapRegion* next = cur->next_in_collection_set();
  6036     assert(cur->in_collection_set(), "bad CS");
  6037     cur->set_next_in_collection_set(NULL);
  6038     cur->set_in_collection_set(false);
  6039     cur->set_young_index_in_cset(-1);
  6040     cur = next;
  6044 void G1CollectedHeap::set_free_regions_coming() {
  6045   if (G1ConcRegionFreeingVerbose) {
  6046     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  6047                            "setting free regions coming");
  6050   assert(!free_regions_coming(), "pre-condition");
  6051   _free_regions_coming = true;
  6054 void G1CollectedHeap::reset_free_regions_coming() {
  6055   assert(free_regions_coming(), "pre-condition");
  6058     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6059     _free_regions_coming = false;
  6060     SecondaryFreeList_lock->notify_all();
  6063   if (G1ConcRegionFreeingVerbose) {
  6064     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  6065                            "reset free regions coming");
  6069 void G1CollectedHeap::wait_while_free_regions_coming() {
  6070   // Most of the time we won't have to wait, so let's do a quick test
  6071   // first before we take the lock.
  6072   if (!free_regions_coming()) {
  6073     return;
  6076   if (G1ConcRegionFreeingVerbose) {
  6077     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  6078                            "waiting for free regions");
  6082     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6083     while (free_regions_coming()) {
  6084       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
  6088   if (G1ConcRegionFreeingVerbose) {
  6089     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  6090                            "done waiting for free regions");
  6094 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  6095   assert(heap_lock_held_for_gc(),
  6096               "the heap lock should already be held by or for this thread");
  6097   _young_list->push_region(hr);
  6100 class NoYoungRegionsClosure: public HeapRegionClosure {
  6101 private:
  6102   bool _success;
  6103 public:
  6104   NoYoungRegionsClosure() : _success(true) { }
  6105   bool doHeapRegion(HeapRegion* r) {
  6106     if (r->is_young()) {
  6107       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  6108                              r->bottom(), r->end());
  6109       _success = false;
  6111     return false;
  6113   bool success() { return _success; }
  6114 };
  6116 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
  6117   bool ret = _young_list->check_list_empty(check_sample);
  6119   if (check_heap) {
  6120     NoYoungRegionsClosure closure;
  6121     heap_region_iterate(&closure);
  6122     ret = ret && closure.success();
  6125   return ret;
  6128 class TearDownRegionSetsClosure : public HeapRegionClosure {
  6129 private:
  6130   OldRegionSet *_old_set;
  6132 public:
  6133   TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
  6135   bool doHeapRegion(HeapRegion* r) {
  6136     if (r->is_empty()) {
  6137       // We ignore empty regions, we'll empty the free list afterwards
  6138     } else if (r->is_young()) {
  6139       // We ignore young regions, we'll empty the young list afterwards
  6140     } else if (r->isHumongous()) {
  6141       // We ignore humongous regions, we're not tearing down the
  6142       // humongous region set
  6143     } else {
  6144       // The rest should be old
  6145       _old_set->remove(r);
  6147     return false;
  6150   ~TearDownRegionSetsClosure() {
  6151     assert(_old_set->is_empty(), "post-condition");
  6153 };
  6155 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
  6156   assert_at_safepoint(true /* should_be_vm_thread */);
  6158   if (!free_list_only) {
  6159     TearDownRegionSetsClosure cl(&_old_set);
  6160     heap_region_iterate(&cl);
  6162     // Need to do this after the heap iteration to be able to
  6163     // recognize the young regions and ignore them during the iteration.
  6164     _young_list->empty_list();
  6166   _free_list.remove_all();
  6169 class RebuildRegionSetsClosure : public HeapRegionClosure {
  6170 private:
  6171   bool            _free_list_only;
  6172   OldRegionSet*   _old_set;
  6173   FreeRegionList* _free_list;
  6174   size_t          _total_used;
  6176 public:
  6177   RebuildRegionSetsClosure(bool free_list_only,
  6178                            OldRegionSet* old_set, FreeRegionList* free_list) :
  6179     _free_list_only(free_list_only),
  6180     _old_set(old_set), _free_list(free_list), _total_used(0) {
  6181     assert(_free_list->is_empty(), "pre-condition");
  6182     if (!free_list_only) {
  6183       assert(_old_set->is_empty(), "pre-condition");
  6187   bool doHeapRegion(HeapRegion* r) {
  6188     if (r->continuesHumongous()) {
  6189       return false;
  6192     if (r->is_empty()) {
  6193       // Add free regions to the free list
  6194       _free_list->add_as_tail(r);
  6195     } else if (!_free_list_only) {
  6196       assert(!r->is_young(), "we should not come across young regions");
  6198       if (r->isHumongous()) {
  6199         // We ignore humongous regions, we left the humongous set unchanged
  6200       } else {
  6201         // The rest should be old, add them to the old set
  6202         _old_set->add(r);
  6204       _total_used += r->used();
  6207     return false;
  6210   size_t total_used() {
  6211     return _total_used;
  6213 };
  6215 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
  6216   assert_at_safepoint(true /* should_be_vm_thread */);
  6218   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
  6219   heap_region_iterate(&cl);
  6221   if (!free_list_only) {
  6222     _summary_bytes_used = cl.total_used();
  6224   assert(_summary_bytes_used == recalculate_used(),
  6225          err_msg("inconsistent _summary_bytes_used, "
  6226                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
  6227                  _summary_bytes_used, recalculate_used()));
  6230 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  6231   _refine_cte_cl->set_concurrent(concurrent);
  6234 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  6235   HeapRegion* hr = heap_region_containing(p);
  6236   if (hr == NULL) {
  6237     return false;
  6238   } else {
  6239     return hr->is_in(p);
  6243 // Methods for the mutator alloc region
  6245 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
  6246                                                       bool force) {
  6247   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6248   assert(!force || g1_policy()->can_expand_young_list(),
  6249          "if force is true we should be able to expand the young list");
  6250   bool young_list_full = g1_policy()->is_young_list_full();
  6251   if (force || !young_list_full) {
  6252     HeapRegion* new_alloc_region = new_region(word_size,
  6253                                               false /* do_expand */);
  6254     if (new_alloc_region != NULL) {
  6255       set_region_short_lived_locked(new_alloc_region);
  6256       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
  6257       return new_alloc_region;
  6260   return NULL;
  6263 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
  6264                                                   size_t allocated_bytes) {
  6265   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6266   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
  6268   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
  6269   _summary_bytes_used += allocated_bytes;
  6270   _hr_printer.retire(alloc_region);
  6271   // We update the eden sizes here, when the region is retired,
  6272   // instead of when it's allocated, since this is the point that its
  6273   // used space has been recored in _summary_bytes_used.
  6274   g1mm()->update_eden_size();
  6277 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
  6278                                                     bool force) {
  6279   return _g1h->new_mutator_alloc_region(word_size, force);
  6282 void G1CollectedHeap::set_par_threads() {
  6283   // Don't change the number of workers.  Use the value previously set
  6284   // in the workgroup.
  6285   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
  6286   uint n_workers = workers()->active_workers();
  6287   assert(UseDynamicNumberOfGCThreads ||
  6288            n_workers == workers()->total_workers(),
  6289       "Otherwise should be using the total number of workers");
  6290   if (n_workers == 0) {
  6291     assert(false, "Should have been set in prior evacuation pause.");
  6292     n_workers = ParallelGCThreads;
  6293     workers()->set_active_workers(n_workers);
  6295   set_par_threads(n_workers);
  6298 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
  6299                                        size_t allocated_bytes) {
  6300   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
  6303 // Methods for the GC alloc regions
  6305 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
  6306                                                  uint count,
  6307                                                  GCAllocPurpose ap) {
  6308   assert(FreeList_lock->owned_by_self(), "pre-condition");
  6310   if (count < g1_policy()->max_regions(ap)) {
  6311     HeapRegion* new_alloc_region = new_region(word_size,
  6312                                               true /* do_expand */);
  6313     if (new_alloc_region != NULL) {
  6314       // We really only need to do this for old regions given that we
  6315       // should never scan survivors. But it doesn't hurt to do it
  6316       // for survivors too.
  6317       new_alloc_region->set_saved_mark();
  6318       if (ap == GCAllocForSurvived) {
  6319         new_alloc_region->set_survivor();
  6320         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
  6321       } else {
  6322         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
  6324       bool during_im = g1_policy()->during_initial_mark_pause();
  6325       new_alloc_region->note_start_of_copying(during_im);
  6326       return new_alloc_region;
  6327     } else {
  6328       g1_policy()->note_alloc_region_limit_reached(ap);
  6331   return NULL;
  6334 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
  6335                                              size_t allocated_bytes,
  6336                                              GCAllocPurpose ap) {
  6337   bool during_im = g1_policy()->during_initial_mark_pause();
  6338   alloc_region->note_end_of_copying(during_im);
  6339   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
  6340   if (ap == GCAllocForSurvived) {
  6341     young_list()->add_survivor_region(alloc_region);
  6342   } else {
  6343     _old_set.add(alloc_region);
  6345   _hr_printer.retire(alloc_region);
  6348 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
  6349                                                        bool force) {
  6350   assert(!force, "not supported for GC alloc regions");
  6351   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
  6354 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
  6355                                           size_t allocated_bytes) {
  6356   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
  6357                                GCAllocForSurvived);
  6360 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
  6361                                                   bool force) {
  6362   assert(!force, "not supported for GC alloc regions");
  6363   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
  6366 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
  6367                                      size_t allocated_bytes) {
  6368   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
  6369                                GCAllocForTenured);
  6371 // Heap region set verification
  6373 class VerifyRegionListsClosure : public HeapRegionClosure {
  6374 private:
  6375   FreeRegionList*     _free_list;
  6376   OldRegionSet*       _old_set;
  6377   HumongousRegionSet* _humongous_set;
  6378   uint                _region_count;
  6380 public:
  6381   VerifyRegionListsClosure(OldRegionSet* old_set,
  6382                            HumongousRegionSet* humongous_set,
  6383                            FreeRegionList* free_list) :
  6384     _old_set(old_set), _humongous_set(humongous_set),
  6385     _free_list(free_list), _region_count(0) { }
  6387   uint region_count() { return _region_count; }
  6389   bool doHeapRegion(HeapRegion* hr) {
  6390     _region_count += 1;
  6392     if (hr->continuesHumongous()) {
  6393       return false;
  6396     if (hr->is_young()) {
  6397       // TODO
  6398     } else if (hr->startsHumongous()) {
  6399       _humongous_set->verify_next_region(hr);
  6400     } else if (hr->is_empty()) {
  6401       _free_list->verify_next_region(hr);
  6402     } else {
  6403       _old_set->verify_next_region(hr);
  6405     return false;
  6407 };
  6409 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
  6410                                              HeapWord* bottom) {
  6411   HeapWord* end = bottom + HeapRegion::GrainWords;
  6412   MemRegion mr(bottom, end);
  6413   assert(_g1_reserved.contains(mr), "invariant");
  6414   // This might return NULL if the allocation fails
  6415   return new HeapRegion(hrs_index, _bot_shared, mr);
  6418 void G1CollectedHeap::verify_region_sets() {
  6419   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6421   // First, check the explicit lists.
  6422   _free_list.verify();
  6424     // Given that a concurrent operation might be adding regions to
  6425     // the secondary free list we have to take the lock before
  6426     // verifying it.
  6427     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6428     _secondary_free_list.verify();
  6430   _old_set.verify();
  6431   _humongous_set.verify();
  6433   // If a concurrent region freeing operation is in progress it will
  6434   // be difficult to correctly attributed any free regions we come
  6435   // across to the correct free list given that they might belong to
  6436   // one of several (free_list, secondary_free_list, any local lists,
  6437   // etc.). So, if that's the case we will skip the rest of the
  6438   // verification operation. Alternatively, waiting for the concurrent
  6439   // operation to complete will have a non-trivial effect on the GC's
  6440   // operation (no concurrent operation will last longer than the
  6441   // interval between two calls to verification) and it might hide
  6442   // any issues that we would like to catch during testing.
  6443   if (free_regions_coming()) {
  6444     return;
  6447   // Make sure we append the secondary_free_list on the free_list so
  6448   // that all free regions we will come across can be safely
  6449   // attributed to the free_list.
  6450   append_secondary_free_list_if_not_empty_with_lock();
  6452   // Finally, make sure that the region accounting in the lists is
  6453   // consistent with what we see in the heap.
  6454   _old_set.verify_start();
  6455   _humongous_set.verify_start();
  6456   _free_list.verify_start();
  6458   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
  6459   heap_region_iterate(&cl);
  6461   _old_set.verify_end();
  6462   _humongous_set.verify_end();
  6463   _free_list.verify_end();

mercurial