src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Mon, 21 Jul 2014 10:00:31 +0200

author
tschatzl
date
Mon, 21 Jul 2014 10:00:31 +0200
changeset 7018
a22acf6d7598
parent 6996
f3aeae1f9fc5
child 7019
755930f931e3
permissions
-rw-r--r--

8048112: G1 Full GC needs to support the case when the very first region is not available
Summary: Refactor preparation for compaction during Full GC so that it lazily initializes the first compaction point. This also avoids problems later when the first region may not be committed. Also reviewed by K. Barrett.
Reviewed-by: brutisso

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #if !defined(__clang_major__) && defined(__GNUC__)
    26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
    27 #endif
    29 #include "precompiled.hpp"
    30 #include "code/codeCache.hpp"
    31 #include "code/icBuffer.hpp"
    32 #include "gc_implementation/g1/bufferingOopClosure.hpp"
    33 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    34 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    35 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    36 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    37 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    38 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    39 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    40 #include "gc_implementation/g1/g1EvacFailure.hpp"
    41 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
    42 #include "gc_implementation/g1/g1Log.hpp"
    43 #include "gc_implementation/g1/g1MarkSweep.hpp"
    44 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    45 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
    46 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    47 #include "gc_implementation/g1/g1StringDedup.hpp"
    48 #include "gc_implementation/g1/g1YCTypes.hpp"
    49 #include "gc_implementation/g1/heapRegion.inline.hpp"
    50 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    51 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    52 #include "gc_implementation/g1/vm_operations_g1.hpp"
    53 #include "gc_implementation/shared/gcHeapSummary.hpp"
    54 #include "gc_implementation/shared/gcTimer.hpp"
    55 #include "gc_implementation/shared/gcTrace.hpp"
    56 #include "gc_implementation/shared/gcTraceTime.hpp"
    57 #include "gc_implementation/shared/isGCActiveMark.hpp"
    58 #include "memory/allocation.hpp"
    59 #include "memory/gcLocker.inline.hpp"
    60 #include "memory/generationSpec.hpp"
    61 #include "memory/iterator.hpp"
    62 #include "memory/referenceProcessor.hpp"
    63 #include "oops/oop.inline.hpp"
    64 #include "oops/oop.pcgc.inline.hpp"
    65 #include "runtime/orderAccess.inline.hpp"
    66 #include "runtime/vmThread.hpp"
    68 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
    70 // turn it on so that the contents of the young list (scan-only /
    71 // to-be-collected) are printed at "strategic" points before / during
    72 // / after the collection --- this is useful for debugging
    73 #define YOUNG_LIST_VERBOSE 0
    74 // CURRENT STATUS
    75 // This file is under construction.  Search for "FIXME".
    77 // INVARIANTS/NOTES
    78 //
    79 // All allocation activity covered by the G1CollectedHeap interface is
    80 // serialized by acquiring the HeapLock.  This happens in mem_allocate
    81 // and allocate_new_tlab, which are the "entry" points to the
    82 // allocation code from the rest of the JVM.  (Note that this does not
    83 // apply to TLAB allocation, which is not part of this interface: it
    84 // is done by clients of this interface.)
    86 // Notes on implementation of parallelism in different tasks.
    87 //
    88 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
    89 // The number of GC workers is passed to heap_region_par_iterate_chunked().
    90 // It does use run_task() which sets _n_workers in the task.
    91 // G1ParTask executes g1_process_roots() ->
    92 // SharedHeap::process_roots() which calls eventually to
    93 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
    94 // SequentialSubTasksDone.  SharedHeap::process_roots() also
    95 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
    96 //
    98 // Local to this file.
   100 class RefineCardTableEntryClosure: public CardTableEntryClosure {
   101   bool _concurrent;
   102 public:
   103   RefineCardTableEntryClosure() : _concurrent(true) { }
   105   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
   106     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
   107     // This path is executed by the concurrent refine or mutator threads,
   108     // concurrently, and so we do not care if card_ptr contains references
   109     // that point into the collection set.
   110     assert(!oops_into_cset, "should be");
   112     if (_concurrent && SuspendibleThreadSet::should_yield()) {
   113       // Caller will actually yield.
   114       return false;
   115     }
   116     // Otherwise, we finished successfully; return true.
   117     return true;
   118   }
   120   void set_concurrent(bool b) { _concurrent = b; }
   121 };
   124 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
   125   size_t _num_processed;
   126   CardTableModRefBS* _ctbs;
   127   int _histo[256];
   129  public:
   130   ClearLoggedCardTableEntryClosure() :
   131     _num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
   132   {
   133     for (int i = 0; i < 256; i++) _histo[i] = 0;
   134   }
   136   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
   137     unsigned char* ujb = (unsigned char*)card_ptr;
   138     int ind = (int)(*ujb);
   139     _histo[ind]++;
   141     *card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
   142     _num_processed++;
   144     return true;
   145   }
   147   size_t num_processed() { return _num_processed; }
   149   void print_histo() {
   150     gclog_or_tty->print_cr("Card table value histogram:");
   151     for (int i = 0; i < 256; i++) {
   152       if (_histo[i] != 0) {
   153         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
   154       }
   155     }
   156   }
   157 };
   159 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
   160  private:
   161   size_t _num_processed;
   163  public:
   164   RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
   166   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
   167     *card_ptr = CardTableModRefBS::dirty_card_val();
   168     _num_processed++;
   169     return true;
   170   }
   172   size_t num_processed() const { return _num_processed; }
   173 };
   175 YoungList::YoungList(G1CollectedHeap* g1h) :
   176     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
   177     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
   178   guarantee(check_list_empty(false), "just making sure...");
   179 }
   181 void YoungList::push_region(HeapRegion *hr) {
   182   assert(!hr->is_young(), "should not already be young");
   183   assert(hr->get_next_young_region() == NULL, "cause it should!");
   185   hr->set_next_young_region(_head);
   186   _head = hr;
   188   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
   189   ++_length;
   190 }
   192 void YoungList::add_survivor_region(HeapRegion* hr) {
   193   assert(hr->is_survivor(), "should be flagged as survivor region");
   194   assert(hr->get_next_young_region() == NULL, "cause it should!");
   196   hr->set_next_young_region(_survivor_head);
   197   if (_survivor_head == NULL) {
   198     _survivor_tail = hr;
   199   }
   200   _survivor_head = hr;
   201   ++_survivor_length;
   202 }
   204 void YoungList::empty_list(HeapRegion* list) {
   205   while (list != NULL) {
   206     HeapRegion* next = list->get_next_young_region();
   207     list->set_next_young_region(NULL);
   208     list->uninstall_surv_rate_group();
   209     list->set_not_young();
   210     list = next;
   211   }
   212 }
   214 void YoungList::empty_list() {
   215   assert(check_list_well_formed(), "young list should be well formed");
   217   empty_list(_head);
   218   _head = NULL;
   219   _length = 0;
   221   empty_list(_survivor_head);
   222   _survivor_head = NULL;
   223   _survivor_tail = NULL;
   224   _survivor_length = 0;
   226   _last_sampled_rs_lengths = 0;
   228   assert(check_list_empty(false), "just making sure...");
   229 }
   231 bool YoungList::check_list_well_formed() {
   232   bool ret = true;
   234   uint length = 0;
   235   HeapRegion* curr = _head;
   236   HeapRegion* last = NULL;
   237   while (curr != NULL) {
   238     if (!curr->is_young()) {
   239       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   240                              "incorrectly tagged (y: %d, surv: %d)",
   241                              curr->bottom(), curr->end(),
   242                              curr->is_young(), curr->is_survivor());
   243       ret = false;
   244     }
   245     ++length;
   246     last = curr;
   247     curr = curr->get_next_young_region();
   248   }
   249   ret = ret && (length == _length);
   251   if (!ret) {
   252     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   253     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
   254                            length, _length);
   255   }
   257   return ret;
   258 }
   260 bool YoungList::check_list_empty(bool check_sample) {
   261   bool ret = true;
   263   if (_length != 0) {
   264     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
   265                   _length);
   266     ret = false;
   267   }
   268   if (check_sample && _last_sampled_rs_lengths != 0) {
   269     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   270     ret = false;
   271   }
   272   if (_head != NULL) {
   273     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   274     ret = false;
   275   }
   276   if (!ret) {
   277     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   278   }
   280   return ret;
   281 }
   283 void
   284 YoungList::rs_length_sampling_init() {
   285   _sampled_rs_lengths = 0;
   286   _curr               = _head;
   287 }
   289 bool
   290 YoungList::rs_length_sampling_more() {
   291   return _curr != NULL;
   292 }
   294 void
   295 YoungList::rs_length_sampling_next() {
   296   assert( _curr != NULL, "invariant" );
   297   size_t rs_length = _curr->rem_set()->occupied();
   299   _sampled_rs_lengths += rs_length;
   301   // The current region may not yet have been added to the
   302   // incremental collection set (it gets added when it is
   303   // retired as the current allocation region).
   304   if (_curr->in_collection_set()) {
   305     // Update the collection set policy information for this region
   306     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
   307   }
   309   _curr = _curr->get_next_young_region();
   310   if (_curr == NULL) {
   311     _last_sampled_rs_lengths = _sampled_rs_lengths;
   312     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   313   }
   314 }
   316 void
   317 YoungList::reset_auxilary_lists() {
   318   guarantee( is_empty(), "young list should be empty" );
   319   assert(check_list_well_formed(), "young list should be well formed");
   321   // Add survivor regions to SurvRateGroup.
   322   _g1h->g1_policy()->note_start_adding_survivor_regions();
   323   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   325   int young_index_in_cset = 0;
   326   for (HeapRegion* curr = _survivor_head;
   327        curr != NULL;
   328        curr = curr->get_next_young_region()) {
   329     _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
   331     // The region is a non-empty survivor so let's add it to
   332     // the incremental collection set for the next evacuation
   333     // pause.
   334     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   335     young_index_in_cset += 1;
   336   }
   337   assert((uint) young_index_in_cset == _survivor_length, "post-condition");
   338   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   340   _head   = _survivor_head;
   341   _length = _survivor_length;
   342   if (_survivor_head != NULL) {
   343     assert(_survivor_tail != NULL, "cause it shouldn't be");
   344     assert(_survivor_length > 0, "invariant");
   345     _survivor_tail->set_next_young_region(NULL);
   346   }
   348   // Don't clear the survivor list handles until the start of
   349   // the next evacuation pause - we need it in order to re-tag
   350   // the survivor regions from this evacuation pause as 'young'
   351   // at the start of the next.
   353   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   355   assert(check_list_well_formed(), "young list should be well formed");
   356 }
   358 void YoungList::print() {
   359   HeapRegion* lists[] = {_head,   _survivor_head};
   360   const char* names[] = {"YOUNG", "SURVIVOR"};
   362   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   363     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   364     HeapRegion *curr = lists[list];
   365     if (curr == NULL)
   366       gclog_or_tty->print_cr("  empty");
   367     while (curr != NULL) {
   368       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
   369                              HR_FORMAT_PARAMS(curr),
   370                              curr->prev_top_at_mark_start(),
   371                              curr->next_top_at_mark_start(),
   372                              curr->age_in_surv_rate_group_cond());
   373       curr = curr->get_next_young_region();
   374     }
   375   }
   377   gclog_or_tty->cr();
   378 }
   380 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   381 {
   382   // Claim the right to put the region on the dirty cards region list
   383   // by installing a self pointer.
   384   HeapRegion* next = hr->get_next_dirty_cards_region();
   385   if (next == NULL) {
   386     HeapRegion* res = (HeapRegion*)
   387       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   388                           NULL);
   389     if (res == NULL) {
   390       HeapRegion* head;
   391       do {
   392         // Put the region to the dirty cards region list.
   393         head = _dirty_cards_region_list;
   394         next = (HeapRegion*)
   395           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   396         if (next == head) {
   397           assert(hr->get_next_dirty_cards_region() == hr,
   398                  "hr->get_next_dirty_cards_region() != hr");
   399           if (next == NULL) {
   400             // The last region in the list points to itself.
   401             hr->set_next_dirty_cards_region(hr);
   402           } else {
   403             hr->set_next_dirty_cards_region(next);
   404           }
   405         }
   406       } while (next != head);
   407     }
   408   }
   409 }
   411 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   412 {
   413   HeapRegion* head;
   414   HeapRegion* hr;
   415   do {
   416     head = _dirty_cards_region_list;
   417     if (head == NULL) {
   418       return NULL;
   419     }
   420     HeapRegion* new_head = head->get_next_dirty_cards_region();
   421     if (head == new_head) {
   422       // The last region.
   423       new_head = NULL;
   424     }
   425     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   426                                           head);
   427   } while (hr != head);
   428   assert(hr != NULL, "invariant");
   429   hr->set_next_dirty_cards_region(NULL);
   430   return hr;
   431 }
   433 #ifdef ASSERT
   434 // A region is added to the collection set as it is retired
   435 // so an address p can point to a region which will be in the
   436 // collection set but has not yet been retired.  This method
   437 // therefore is only accurate during a GC pause after all
   438 // regions have been retired.  It is used for debugging
   439 // to check if an nmethod has references to objects that can
   440 // be move during a partial collection.  Though it can be
   441 // inaccurate, it is sufficient for G1 because the conservative
   442 // implementation of is_scavengable() for G1 will indicate that
   443 // all nmethods must be scanned during a partial collection.
   444 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
   445   HeapRegion* hr = heap_region_containing(p);
   446   return hr != NULL && hr->in_collection_set();
   447 }
   448 #endif
   450 // Returns true if the reference points to an object that
   451 // can move in an incremental collection.
   452 bool G1CollectedHeap::is_scavengable(const void* p) {
   453   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   454   G1CollectorPolicy* g1p = g1h->g1_policy();
   455   HeapRegion* hr = heap_region_containing(p);
   456   if (hr == NULL) {
   457      // null
   458      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
   459      return false;
   460   } else {
   461     return !hr->isHumongous();
   462   }
   463 }
   465 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   466   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   467   CardTableModRefBS* ct_bs = g1_barrier_set();
   469   // Count the dirty cards at the start.
   470   CountNonCleanMemRegionClosure count1(this);
   471   ct_bs->mod_card_iterate(&count1);
   472   int orig_count = count1.n();
   474   // First clear the logged cards.
   475   ClearLoggedCardTableEntryClosure clear;
   476   dcqs.apply_closure_to_all_completed_buffers(&clear);
   477   dcqs.iterate_closure_all_threads(&clear, false);
   478   clear.print_histo();
   480   // Now ensure that there's no dirty cards.
   481   CountNonCleanMemRegionClosure count2(this);
   482   ct_bs->mod_card_iterate(&count2);
   483   if (count2.n() != 0) {
   484     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   485                            count2.n(), orig_count);
   486   }
   487   guarantee(count2.n() == 0, "Card table should be clean.");
   489   RedirtyLoggedCardTableEntryClosure redirty;
   490   dcqs.apply_closure_to_all_completed_buffers(&redirty);
   491   dcqs.iterate_closure_all_threads(&redirty, false);
   492   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   493                          clear.num_processed(), orig_count);
   494   guarantee(redirty.num_processed() == clear.num_processed(),
   495             err_msg("Redirtied "SIZE_FORMAT" cards, bug cleared "SIZE_FORMAT,
   496                     redirty.num_processed(), clear.num_processed()));
   498   CountNonCleanMemRegionClosure count3(this);
   499   ct_bs->mod_card_iterate(&count3);
   500   if (count3.n() != orig_count) {
   501     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   502                            orig_count, count3.n());
   503     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   504   }
   505 }
   507 // Private class members.
   509 G1CollectedHeap* G1CollectedHeap::_g1h;
   511 // Private methods.
   513 HeapRegion*
   514 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
   515   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   516   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
   517     if (!_secondary_free_list.is_empty()) {
   518       if (G1ConcRegionFreeingVerbose) {
   519         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   520                                "secondary_free_list has %u entries",
   521                                _secondary_free_list.length());
   522       }
   523       // It looks as if there are free regions available on the
   524       // secondary_free_list. Let's move them to the free_list and try
   525       // again to allocate from it.
   526       append_secondary_free_list();
   528       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
   529              "empty we should have moved at least one entry to the free_list");
   530       HeapRegion* res = _free_list.remove_region(is_old);
   531       if (G1ConcRegionFreeingVerbose) {
   532         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   533                                "allocated "HR_FORMAT" from secondary_free_list",
   534                                HR_FORMAT_PARAMS(res));
   535       }
   536       return res;
   537     }
   539     // Wait here until we get notified either when (a) there are no
   540     // more free regions coming or (b) some regions have been moved on
   541     // the secondary_free_list.
   542     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
   543   }
   545   if (G1ConcRegionFreeingVerbose) {
   546     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   547                            "could not allocate from secondary_free_list");
   548   }
   549   return NULL;
   550 }
   552 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
   553   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
   554          "the only time we use this to allocate a humongous region is "
   555          "when we are allocating a single humongous region");
   557   HeapRegion* res;
   558   if (G1StressConcRegionFreeing) {
   559     if (!_secondary_free_list.is_empty()) {
   560       if (G1ConcRegionFreeingVerbose) {
   561         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   562                                "forced to look at the secondary_free_list");
   563       }
   564       res = new_region_try_secondary_free_list(is_old);
   565       if (res != NULL) {
   566         return res;
   567       }
   568     }
   569   }
   571   res = _free_list.remove_region(is_old);
   573   if (res == NULL) {
   574     if (G1ConcRegionFreeingVerbose) {
   575       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   576                              "res == NULL, trying the secondary_free_list");
   577     }
   578     res = new_region_try_secondary_free_list(is_old);
   579   }
   580   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
   581     // Currently, only attempts to allocate GC alloc regions set
   582     // do_expand to true. So, we should only reach here during a
   583     // safepoint. If this assumption changes we might have to
   584     // reconsider the use of _expand_heap_after_alloc_failure.
   585     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
   587     ergo_verbose1(ErgoHeapSizing,
   588                   "attempt heap expansion",
   589                   ergo_format_reason("region allocation request failed")
   590                   ergo_format_byte("allocation request"),
   591                   word_size * HeapWordSize);
   592     if (expand(word_size * HeapWordSize)) {
   593       // Given that expand() succeeded in expanding the heap, and we
   594       // always expand the heap by an amount aligned to the heap
   595       // region size, the free list should in theory not be empty.
   596       // In either case remove_region() will check for NULL.
   597       res = _free_list.remove_region(is_old);
   598     } else {
   599       _expand_heap_after_alloc_failure = false;
   600     }
   601   }
   602   return res;
   603 }
   605 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
   606                                                         size_t word_size) {
   607   assert(isHumongous(word_size), "word_size should be humongous");
   608   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   610   uint first = G1_NULL_HRS_INDEX;
   611   if (num_regions == 1) {
   612     // Only one region to allocate, no need to go through the slower
   613     // path. The caller will attempt the expansion if this fails, so
   614     // let's not try to expand here too.
   615     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
   616     if (hr != NULL) {
   617       first = hr->hrs_index();
   618     } else {
   619       first = G1_NULL_HRS_INDEX;
   620     }
   621   } else {
   622     // We can't allocate humongous regions while cleanupComplete() is
   623     // running, since some of the regions we find to be empty might not
   624     // yet be added to the free list and it is not straightforward to
   625     // know which list they are on so that we can remove them. Note
   626     // that we only need to do this if we need to allocate more than
   627     // one region to satisfy the current humongous allocation
   628     // request. If we are only allocating one region we use the common
   629     // region allocation code (see above).
   630     wait_while_free_regions_coming();
   631     append_secondary_free_list_if_not_empty_with_lock();
   633     if (free_regions() >= num_regions) {
   634       first = _hrs.find_contiguous(num_regions);
   635       if (first != G1_NULL_HRS_INDEX) {
   636         for (uint i = first; i < first + num_regions; ++i) {
   637           HeapRegion* hr = region_at(i);
   638           assert(hr->is_empty(), "sanity");
   639           assert(is_on_master_free_list(hr), "sanity");
   640           hr->set_pending_removal(true);
   641         }
   642         _free_list.remove_all_pending(num_regions);
   643       }
   644     }
   645   }
   646   return first;
   647 }
   649 HeapWord*
   650 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
   651                                                            uint num_regions,
   652                                                            size_t word_size) {
   653   assert(first != G1_NULL_HRS_INDEX, "pre-condition");
   654   assert(isHumongous(word_size), "word_size should be humongous");
   655   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   657   // Index of last region in the series + 1.
   658   uint last = first + num_regions;
   660   // We need to initialize the region(s) we just discovered. This is
   661   // a bit tricky given that it can happen concurrently with
   662   // refinement threads refining cards on these regions and
   663   // potentially wanting to refine the BOT as they are scanning
   664   // those cards (this can happen shortly after a cleanup; see CR
   665   // 6991377). So we have to set up the region(s) carefully and in
   666   // a specific order.
   668   // The word size sum of all the regions we will allocate.
   669   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
   670   assert(word_size <= word_size_sum, "sanity");
   672   // This will be the "starts humongous" region.
   673   HeapRegion* first_hr = region_at(first);
   674   // The header of the new object will be placed at the bottom of
   675   // the first region.
   676   HeapWord* new_obj = first_hr->bottom();
   677   // This will be the new end of the first region in the series that
   678   // should also match the end of the last region in the series.
   679   HeapWord* new_end = new_obj + word_size_sum;
   680   // This will be the new top of the first region that will reflect
   681   // this allocation.
   682   HeapWord* new_top = new_obj + word_size;
   684   // First, we need to zero the header of the space that we will be
   685   // allocating. When we update top further down, some refinement
   686   // threads might try to scan the region. By zeroing the header we
   687   // ensure that any thread that will try to scan the region will
   688   // come across the zero klass word and bail out.
   689   //
   690   // NOTE: It would not have been correct to have used
   691   // CollectedHeap::fill_with_object() and make the space look like
   692   // an int array. The thread that is doing the allocation will
   693   // later update the object header to a potentially different array
   694   // type and, for a very short period of time, the klass and length
   695   // fields will be inconsistent. This could cause a refinement
   696   // thread to calculate the object size incorrectly.
   697   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   699   // We will set up the first region as "starts humongous". This
   700   // will also update the BOT covering all the regions to reflect
   701   // that there is a single object that starts at the bottom of the
   702   // first region.
   703   first_hr->set_startsHumongous(new_top, new_end);
   705   // Then, if there are any, we will set up the "continues
   706   // humongous" regions.
   707   HeapRegion* hr = NULL;
   708   for (uint i = first + 1; i < last; ++i) {
   709     hr = region_at(i);
   710     hr->set_continuesHumongous(first_hr);
   711   }
   712   // If we have "continues humongous" regions (hr != NULL), then the
   713   // end of the last one should match new_end.
   714   assert(hr == NULL || hr->end() == new_end, "sanity");
   716   // Up to this point no concurrent thread would have been able to
   717   // do any scanning on any region in this series. All the top
   718   // fields still point to bottom, so the intersection between
   719   // [bottom,top] and [card_start,card_end] will be empty. Before we
   720   // update the top fields, we'll do a storestore to make sure that
   721   // no thread sees the update to top before the zeroing of the
   722   // object header and the BOT initialization.
   723   OrderAccess::storestore();
   725   // Now that the BOT and the object header have been initialized,
   726   // we can update top of the "starts humongous" region.
   727   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
   728          "new_top should be in this region");
   729   first_hr->set_top(new_top);
   730   if (_hr_printer.is_active()) {
   731     HeapWord* bottom = first_hr->bottom();
   732     HeapWord* end = first_hr->orig_end();
   733     if ((first + 1) == last) {
   734       // the series has a single humongous region
   735       _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
   736     } else {
   737       // the series has more than one humongous regions
   738       _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
   739     }
   740   }
   742   // Now, we will update the top fields of the "continues humongous"
   743   // regions. The reason we need to do this is that, otherwise,
   744   // these regions would look empty and this will confuse parts of
   745   // G1. For example, the code that looks for a consecutive number
   746   // of empty regions will consider them empty and try to
   747   // re-allocate them. We can extend is_empty() to also include
   748   // !continuesHumongous(), but it is easier to just update the top
   749   // fields here. The way we set top for all regions (i.e., top ==
   750   // end for all regions but the last one, top == new_top for the
   751   // last one) is actually used when we will free up the humongous
   752   // region in free_humongous_region().
   753   hr = NULL;
   754   for (uint i = first + 1; i < last; ++i) {
   755     hr = region_at(i);
   756     if ((i + 1) == last) {
   757       // last continues humongous region
   758       assert(hr->bottom() < new_top && new_top <= hr->end(),
   759              "new_top should fall on this region");
   760       hr->set_top(new_top);
   761       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
   762     } else {
   763       // not last one
   764       assert(new_top > hr->end(), "new_top should be above this region");
   765       hr->set_top(hr->end());
   766       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
   767     }
   768   }
   769   // If we have continues humongous regions (hr != NULL), then the
   770   // end of the last one should match new_end and its top should
   771   // match new_top.
   772   assert(hr == NULL ||
   773          (hr->end() == new_end && hr->top() == new_top), "sanity");
   775   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
   776   _summary_bytes_used += first_hr->used();
   777   _humongous_set.add(first_hr);
   779   return new_obj;
   780 }
   782 // If could fit into free regions w/o expansion, try.
   783 // Otherwise, if can expand, do so.
   784 // Otherwise, if using ex regions might help, try with ex given back.
   785 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   786   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   788   verify_region_sets_optional();
   790   size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
   791   uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
   792   uint x_num = expansion_regions();
   793   uint fs = _hrs.free_suffix();
   794   uint first = humongous_obj_allocate_find_first(num_regions, word_size);
   795   if (first == G1_NULL_HRS_INDEX) {
   796     // The only thing we can do now is attempt expansion.
   797     if (fs + x_num >= num_regions) {
   798       // If the number of regions we're trying to allocate for this
   799       // object is at most the number of regions in the free suffix,
   800       // then the call to humongous_obj_allocate_find_first() above
   801       // should have succeeded and we wouldn't be here.
   802       //
   803       // We should only be trying to expand when the free suffix is
   804       // not sufficient for the object _and_ we have some expansion
   805       // room available.
   806       assert(num_regions > fs, "earlier allocation should have succeeded");
   808       ergo_verbose1(ErgoHeapSizing,
   809                     "attempt heap expansion",
   810                     ergo_format_reason("humongous allocation request failed")
   811                     ergo_format_byte("allocation request"),
   812                     word_size * HeapWordSize);
   813       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
   814         // Even though the heap was expanded, it might not have
   815         // reached the desired size. So, we cannot assume that the
   816         // allocation will succeed.
   817         first = humongous_obj_allocate_find_first(num_regions, word_size);
   818       }
   819     }
   820   }
   822   HeapWord* result = NULL;
   823   if (first != G1_NULL_HRS_INDEX) {
   824     result =
   825       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
   826     assert(result != NULL, "it should always return a valid result");
   828     // A successful humongous object allocation changes the used space
   829     // information of the old generation so we need to recalculate the
   830     // sizes and update the jstat counters here.
   831     g1mm()->update_sizes();
   832   }
   834   verify_region_sets_optional();
   836   return result;
   837 }
   839 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   840   assert_heap_not_locked_and_not_at_safepoint();
   841   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
   843   unsigned int dummy_gc_count_before;
   844   int dummy_gclocker_retry_count = 0;
   845   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
   846 }
   848 HeapWord*
   849 G1CollectedHeap::mem_allocate(size_t word_size,
   850                               bool*  gc_overhead_limit_was_exceeded) {
   851   assert_heap_not_locked_and_not_at_safepoint();
   853   // Loop until the allocation is satisfied, or unsatisfied after GC.
   854   for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
   855     unsigned int gc_count_before;
   857     HeapWord* result = NULL;
   858     if (!isHumongous(word_size)) {
   859       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
   860     } else {
   861       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
   862     }
   863     if (result != NULL) {
   864       return result;
   865     }
   867     // Create the garbage collection operation...
   868     VM_G1CollectForAllocation op(gc_count_before, word_size);
   869     // ...and get the VM thread to execute it.
   870     VMThread::execute(&op);
   872     if (op.prologue_succeeded() && op.pause_succeeded()) {
   873       // If the operation was successful we'll return the result even
   874       // if it is NULL. If the allocation attempt failed immediately
   875       // after a Full GC, it's unlikely we'll be able to allocate now.
   876       HeapWord* result = op.result();
   877       if (result != NULL && !isHumongous(word_size)) {
   878         // Allocations that take place on VM operations do not do any
   879         // card dirtying and we have to do it here. We only have to do
   880         // this for non-humongous allocations, though.
   881         dirty_young_block(result, word_size);
   882       }
   883       return result;
   884     } else {
   885       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
   886         return NULL;
   887       }
   888       assert(op.result() == NULL,
   889              "the result should be NULL if the VM op did not succeed");
   890     }
   892     // Give a warning if we seem to be looping forever.
   893     if ((QueuedAllocationWarningCount > 0) &&
   894         (try_count % QueuedAllocationWarningCount == 0)) {
   895       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
   896     }
   897   }
   899   ShouldNotReachHere();
   900   return NULL;
   901 }
   903 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   904                                            unsigned int *gc_count_before_ret,
   905                                            int* gclocker_retry_count_ret) {
   906   // Make sure you read the note in attempt_allocation_humongous().
   908   assert_heap_not_locked_and_not_at_safepoint();
   909   assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
   910          "be called for humongous allocation requests");
   912   // We should only get here after the first-level allocation attempt
   913   // (attempt_allocation()) failed to allocate.
   915   // We will loop until a) we manage to successfully perform the
   916   // allocation or b) we successfully schedule a collection which
   917   // fails to perform the allocation. b) is the only case when we'll
   918   // return NULL.
   919   HeapWord* result = NULL;
   920   for (int try_count = 1; /* we'll return */; try_count += 1) {
   921     bool should_try_gc;
   922     unsigned int gc_count_before;
   924     {
   925       MutexLockerEx x(Heap_lock);
   927       result = _mutator_alloc_region.attempt_allocation_locked(word_size,
   928                                                       false /* bot_updates */);
   929       if (result != NULL) {
   930         return result;
   931       }
   933       // If we reach here, attempt_allocation_locked() above failed to
   934       // allocate a new region. So the mutator alloc region should be NULL.
   935       assert(_mutator_alloc_region.get() == NULL, "only way to get here");
   937       if (GC_locker::is_active_and_needs_gc()) {
   938         if (g1_policy()->can_expand_young_list()) {
   939           // No need for an ergo verbose message here,
   940           // can_expand_young_list() does this when it returns true.
   941           result = _mutator_alloc_region.attempt_allocation_force(word_size,
   942                                                       false /* bot_updates */);
   943           if (result != NULL) {
   944             return result;
   945           }
   946         }
   947         should_try_gc = false;
   948       } else {
   949         // The GCLocker may not be active but the GCLocker initiated
   950         // GC may not yet have been performed (GCLocker::needs_gc()
   951         // returns true). In this case we do not try this GC and
   952         // wait until the GCLocker initiated GC is performed, and
   953         // then retry the allocation.
   954         if (GC_locker::needs_gc()) {
   955           should_try_gc = false;
   956         } else {
   957           // Read the GC count while still holding the Heap_lock.
   958           gc_count_before = total_collections();
   959           should_try_gc = true;
   960         }
   961       }
   962     }
   964     if (should_try_gc) {
   965       bool succeeded;
   966       result = do_collection_pause(word_size, gc_count_before, &succeeded,
   967           GCCause::_g1_inc_collection_pause);
   968       if (result != NULL) {
   969         assert(succeeded, "only way to get back a non-NULL result");
   970         return result;
   971       }
   973       if (succeeded) {
   974         // If we get here we successfully scheduled a collection which
   975         // failed to allocate. No point in trying to allocate
   976         // further. We'll just return NULL.
   977         MutexLockerEx x(Heap_lock);
   978         *gc_count_before_ret = total_collections();
   979         return NULL;
   980       }
   981     } else {
   982       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
   983         MutexLockerEx x(Heap_lock);
   984         *gc_count_before_ret = total_collections();
   985         return NULL;
   986       }
   987       // The GCLocker is either active or the GCLocker initiated
   988       // GC has not yet been performed. Stall until it is and
   989       // then retry the allocation.
   990       GC_locker::stall_until_clear();
   991       (*gclocker_retry_count_ret) += 1;
   992     }
   994     // We can reach here if we were unsuccessful in scheduling a
   995     // collection (because another thread beat us to it) or if we were
   996     // stalled due to the GC locker. In either can we should retry the
   997     // allocation attempt in case another thread successfully
   998     // performed a collection and reclaimed enough space. We do the
   999     // first attempt (without holding the Heap_lock) here and the
  1000     // follow-on attempt will be at the start of the next loop
  1001     // iteration (after taking the Heap_lock).
  1002     result = _mutator_alloc_region.attempt_allocation(word_size,
  1003                                                       false /* bot_updates */);
  1004     if (result != NULL) {
  1005       return result;
  1008     // Give a warning if we seem to be looping forever.
  1009     if ((QueuedAllocationWarningCount > 0) &&
  1010         (try_count % QueuedAllocationWarningCount == 0)) {
  1011       warning("G1CollectedHeap::attempt_allocation_slow() "
  1012               "retries %d times", try_count);
  1016   ShouldNotReachHere();
  1017   return NULL;
  1020 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
  1021                                           unsigned int * gc_count_before_ret,
  1022                                           int* gclocker_retry_count_ret) {
  1023   // The structure of this method has a lot of similarities to
  1024   // attempt_allocation_slow(). The reason these two were not merged
  1025   // into a single one is that such a method would require several "if
  1026   // allocation is not humongous do this, otherwise do that"
  1027   // conditional paths which would obscure its flow. In fact, an early
  1028   // version of this code did use a unified method which was harder to
  1029   // follow and, as a result, it had subtle bugs that were hard to
  1030   // track down. So keeping these two methods separate allows each to
  1031   // be more readable. It will be good to keep these two in sync as
  1032   // much as possible.
  1034   assert_heap_not_locked_and_not_at_safepoint();
  1035   assert(isHumongous(word_size), "attempt_allocation_humongous() "
  1036          "should only be called for humongous allocations");
  1038   // Humongous objects can exhaust the heap quickly, so we should check if we
  1039   // need to start a marking cycle at each humongous object allocation. We do
  1040   // the check before we do the actual allocation. The reason for doing it
  1041   // before the allocation is that we avoid having to keep track of the newly
  1042   // allocated memory while we do a GC.
  1043   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
  1044                                            word_size)) {
  1045     collect(GCCause::_g1_humongous_allocation);
  1048   // We will loop until a) we manage to successfully perform the
  1049   // allocation or b) we successfully schedule a collection which
  1050   // fails to perform the allocation. b) is the only case when we'll
  1051   // return NULL.
  1052   HeapWord* result = NULL;
  1053   for (int try_count = 1; /* we'll return */; try_count += 1) {
  1054     bool should_try_gc;
  1055     unsigned int gc_count_before;
  1058       MutexLockerEx x(Heap_lock);
  1060       // Given that humongous objects are not allocated in young
  1061       // regions, we'll first try to do the allocation without doing a
  1062       // collection hoping that there's enough space in the heap.
  1063       result = humongous_obj_allocate(word_size);
  1064       if (result != NULL) {
  1065         return result;
  1068       if (GC_locker::is_active_and_needs_gc()) {
  1069         should_try_gc = false;
  1070       } else {
  1071          // The GCLocker may not be active but the GCLocker initiated
  1072         // GC may not yet have been performed (GCLocker::needs_gc()
  1073         // returns true). In this case we do not try this GC and
  1074         // wait until the GCLocker initiated GC is performed, and
  1075         // then retry the allocation.
  1076         if (GC_locker::needs_gc()) {
  1077           should_try_gc = false;
  1078         } else {
  1079           // Read the GC count while still holding the Heap_lock.
  1080           gc_count_before = total_collections();
  1081           should_try_gc = true;
  1086     if (should_try_gc) {
  1087       // If we failed to allocate the humongous object, we should try to
  1088       // do a collection pause (if we're allowed) in case it reclaims
  1089       // enough space for the allocation to succeed after the pause.
  1091       bool succeeded;
  1092       result = do_collection_pause(word_size, gc_count_before, &succeeded,
  1093           GCCause::_g1_humongous_allocation);
  1094       if (result != NULL) {
  1095         assert(succeeded, "only way to get back a non-NULL result");
  1096         return result;
  1099       if (succeeded) {
  1100         // If we get here we successfully scheduled a collection which
  1101         // failed to allocate. No point in trying to allocate
  1102         // further. We'll just return NULL.
  1103         MutexLockerEx x(Heap_lock);
  1104         *gc_count_before_ret = total_collections();
  1105         return NULL;
  1107     } else {
  1108       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
  1109         MutexLockerEx x(Heap_lock);
  1110         *gc_count_before_ret = total_collections();
  1111         return NULL;
  1113       // The GCLocker is either active or the GCLocker initiated
  1114       // GC has not yet been performed. Stall until it is and
  1115       // then retry the allocation.
  1116       GC_locker::stall_until_clear();
  1117       (*gclocker_retry_count_ret) += 1;
  1120     // We can reach here if we were unsuccessful in scheduling a
  1121     // collection (because another thread beat us to it) or if we were
  1122     // stalled due to the GC locker. In either can we should retry the
  1123     // allocation attempt in case another thread successfully
  1124     // performed a collection and reclaimed enough space.  Give a
  1125     // warning if we seem to be looping forever.
  1127     if ((QueuedAllocationWarningCount > 0) &&
  1128         (try_count % QueuedAllocationWarningCount == 0)) {
  1129       warning("G1CollectedHeap::attempt_allocation_humongous() "
  1130               "retries %d times", try_count);
  1134   ShouldNotReachHere();
  1135   return NULL;
  1138 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
  1139                                        bool expect_null_mutator_alloc_region) {
  1140   assert_at_safepoint(true /* should_be_vm_thread */);
  1141   assert(_mutator_alloc_region.get() == NULL ||
  1142                                              !expect_null_mutator_alloc_region,
  1143          "the current alloc region was unexpectedly found to be non-NULL");
  1145   if (!isHumongous(word_size)) {
  1146     return _mutator_alloc_region.attempt_allocation_locked(word_size,
  1147                                                       false /* bot_updates */);
  1148   } else {
  1149     HeapWord* result = humongous_obj_allocate(word_size);
  1150     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
  1151       g1_policy()->set_initiate_conc_mark_if_possible();
  1153     return result;
  1156   ShouldNotReachHere();
  1159 class PostMCRemSetClearClosure: public HeapRegionClosure {
  1160   G1CollectedHeap* _g1h;
  1161   ModRefBarrierSet* _mr_bs;
  1162 public:
  1163   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
  1164     _g1h(g1h), _mr_bs(mr_bs) {}
  1166   bool doHeapRegion(HeapRegion* r) {
  1167     HeapRegionRemSet* hrrs = r->rem_set();
  1169     if (r->continuesHumongous()) {
  1170       // We'll assert that the strong code root list and RSet is empty
  1171       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
  1172       assert(hrrs->occupied() == 0, "RSet should be empty");
  1173       return false;
  1176     _g1h->reset_gc_time_stamps(r);
  1177     hrrs->clear();
  1178     // You might think here that we could clear just the cards
  1179     // corresponding to the used region.  But no: if we leave a dirty card
  1180     // in a region we might allocate into, then it would prevent that card
  1181     // from being enqueued, and cause it to be missed.
  1182     // Re: the performance cost: we shouldn't be doing full GC anyway!
  1183     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  1185     return false;
  1187 };
  1189 void G1CollectedHeap::clear_rsets_post_compaction() {
  1190   PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
  1191   heap_region_iterate(&rs_clear);
  1194 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  1195   G1CollectedHeap*   _g1h;
  1196   UpdateRSOopClosure _cl;
  1197   int                _worker_i;
  1198 public:
  1199   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
  1200     _cl(g1->g1_rem_set(), worker_i),
  1201     _worker_i(worker_i),
  1202     _g1h(g1)
  1203   { }
  1205   bool doHeapRegion(HeapRegion* r) {
  1206     if (!r->continuesHumongous()) {
  1207       _cl.set_from(r);
  1208       r->oop_iterate(&_cl);
  1210     return false;
  1212 };
  1214 class ParRebuildRSTask: public AbstractGangTask {
  1215   G1CollectedHeap* _g1;
  1216 public:
  1217   ParRebuildRSTask(G1CollectedHeap* g1)
  1218     : AbstractGangTask("ParRebuildRSTask"),
  1219       _g1(g1)
  1220   { }
  1222   void work(uint worker_id) {
  1223     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
  1224     _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
  1225                                           _g1->workers()->active_workers(),
  1226                                          HeapRegion::RebuildRSClaimValue);
  1228 };
  1230 class PostCompactionPrinterClosure: public HeapRegionClosure {
  1231 private:
  1232   G1HRPrinter* _hr_printer;
  1233 public:
  1234   bool doHeapRegion(HeapRegion* hr) {
  1235     assert(!hr->is_young(), "not expecting to find young regions");
  1236     // We only generate output for non-empty regions.
  1237     if (!hr->is_empty()) {
  1238       if (!hr->isHumongous()) {
  1239         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
  1240       } else if (hr->startsHumongous()) {
  1241         if (hr->region_num() == 1) {
  1242           // single humongous region
  1243           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
  1244         } else {
  1245           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
  1247       } else {
  1248         assert(hr->continuesHumongous(), "only way to get here");
  1249         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
  1252     return false;
  1255   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
  1256     : _hr_printer(hr_printer) { }
  1257 };
  1259 void G1CollectedHeap::print_hrs_post_compaction() {
  1260   PostCompactionPrinterClosure cl(hr_printer());
  1261   heap_region_iterate(&cl);
  1264 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1265                                     bool clear_all_soft_refs,
  1266                                     size_t word_size) {
  1267   assert_at_safepoint(true /* should_be_vm_thread */);
  1269   if (GC_locker::check_active_before_gc()) {
  1270     return false;
  1273   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
  1274   gc_timer->register_gc_start();
  1276   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
  1277   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
  1279   SvcGCMarker sgcm(SvcGCMarker::FULL);
  1280   ResourceMark rm;
  1282   print_heap_before_gc();
  1283   trace_heap_before_gc(gc_tracer);
  1285   size_t metadata_prev_used = MetaspaceAux::used_bytes();
  1287   verify_region_sets_optional();
  1289   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
  1290                            collector_policy()->should_clear_all_soft_refs();
  1292   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  1295     IsGCActiveMark x;
  1297     // Timing
  1298     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
  1299     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
  1300     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  1303       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
  1304       TraceCollectorStats tcs(g1mm()->full_collection_counters());
  1305       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
  1307       double start = os::elapsedTime();
  1308       g1_policy()->record_full_collection_start();
  1310       // Note: When we have a more flexible GC logging framework that
  1311       // allows us to add optional attributes to a GC log record we
  1312       // could consider timing and reporting how long we wait in the
  1313       // following two methods.
  1314       wait_while_free_regions_coming();
  1315       // If we start the compaction before the CM threads finish
  1316       // scanning the root regions we might trip them over as we'll
  1317       // be moving objects / updating references. So let's wait until
  1318       // they are done. By telling them to abort, they should complete
  1319       // early.
  1320       _cm->root_regions()->abort();
  1321       _cm->root_regions()->wait_until_scan_finished();
  1322       append_secondary_free_list_if_not_empty_with_lock();
  1324       gc_prologue(true);
  1325       increment_total_collections(true /* full gc */);
  1326       increment_old_marking_cycles_started();
  1328       assert(used() == recalculate_used(), "Should be equal");
  1330       verify_before_gc();
  1332       pre_full_gc_dump(gc_timer);
  1334       COMPILER2_PRESENT(DerivedPointerTable::clear());
  1336       // Disable discovery and empty the discovered lists
  1337       // for the CM ref processor.
  1338       ref_processor_cm()->disable_discovery();
  1339       ref_processor_cm()->abandon_partial_discovery();
  1340       ref_processor_cm()->verify_no_references_recorded();
  1342       // Abandon current iterations of concurrent marking and concurrent
  1343       // refinement, if any are in progress. We have to do this before
  1344       // wait_until_scan_finished() below.
  1345       concurrent_mark()->abort();
  1347       // Make sure we'll choose a new allocation region afterwards.
  1348       release_mutator_alloc_region();
  1349       abandon_gc_alloc_regions();
  1350       g1_rem_set()->cleanupHRRS();
  1352       // We should call this after we retire any currently active alloc
  1353       // regions so that all the ALLOC / RETIRE events are generated
  1354       // before the start GC event.
  1355       _hr_printer.start_gc(true /* full */, (size_t) total_collections());
  1357       // We may have added regions to the current incremental collection
  1358       // set between the last GC or pause and now. We need to clear the
  1359       // incremental collection set and then start rebuilding it afresh
  1360       // after this full GC.
  1361       abandon_collection_set(g1_policy()->inc_cset_head());
  1362       g1_policy()->clear_incremental_cset();
  1363       g1_policy()->stop_incremental_cset_building();
  1365       tear_down_region_sets(false /* free_list_only */);
  1366       g1_policy()->set_gcs_are_young(true);
  1368       // See the comments in g1CollectedHeap.hpp and
  1369       // G1CollectedHeap::ref_processing_init() about
  1370       // how reference processing currently works in G1.
  1372       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
  1373       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
  1375       // Temporarily clear the STW ref processor's _is_alive_non_header field.
  1376       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
  1378       ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
  1379       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
  1381       // Do collection work
  1383         HandleMark hm;  // Discard invalid handles created during gc
  1384         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
  1387       assert(free_regions() == 0, "we should not have added any free regions");
  1388       rebuild_region_sets(false /* free_list_only */);
  1390       // Enqueue any discovered reference objects that have
  1391       // not been removed from the discovered lists.
  1392       ref_processor_stw()->enqueue_discovered_references();
  1394       COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1396       MemoryService::track_memory_usage();
  1398       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  1399       ref_processor_stw()->verify_no_references_recorded();
  1401       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
  1402       ClassLoaderDataGraph::purge();
  1403       MetaspaceAux::verify_metrics();
  1405       // Note: since we've just done a full GC, concurrent
  1406       // marking is no longer active. Therefore we need not
  1407       // re-enable reference discovery for the CM ref processor.
  1408       // That will be done at the start of the next marking cycle.
  1409       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
  1410       ref_processor_cm()->verify_no_references_recorded();
  1412       reset_gc_time_stamp();
  1413       // Since everything potentially moved, we will clear all remembered
  1414       // sets, and clear all cards.  Later we will rebuild remembered
  1415       // sets. We will also reset the GC time stamps of the regions.
  1416       clear_rsets_post_compaction();
  1417       check_gc_time_stamps();
  1419       // Resize the heap if necessary.
  1420       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  1422       if (_hr_printer.is_active()) {
  1423         // We should do this after we potentially resize the heap so
  1424         // that all the COMMIT / UNCOMMIT events are generated before
  1425         // the end GC event.
  1427         print_hrs_post_compaction();
  1428         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
  1431       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
  1432       if (hot_card_cache->use_cache()) {
  1433         hot_card_cache->reset_card_counts();
  1434         hot_card_cache->reset_hot_cache();
  1437       // Rebuild remembered sets of all regions.
  1438       if (G1CollectedHeap::use_parallel_gc_threads()) {
  1439         uint n_workers =
  1440           AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  1441                                                   workers()->active_workers(),
  1442                                                   Threads::number_of_non_daemon_threads());
  1443         assert(UseDynamicNumberOfGCThreads ||
  1444                n_workers == workers()->total_workers(),
  1445                "If not dynamic should be using all the  workers");
  1446         workers()->set_active_workers(n_workers);
  1447         // Set parallel threads in the heap (_n_par_threads) only
  1448         // before a parallel phase and always reset it to 0 after
  1449         // the phase so that the number of parallel threads does
  1450         // no get carried forward to a serial phase where there
  1451         // may be code that is "possibly_parallel".
  1452         set_par_threads(n_workers);
  1454         ParRebuildRSTask rebuild_rs_task(this);
  1455         assert(check_heap_region_claim_values(
  1456                HeapRegion::InitialClaimValue), "sanity check");
  1457         assert(UseDynamicNumberOfGCThreads ||
  1458                workers()->active_workers() == workers()->total_workers(),
  1459                "Unless dynamic should use total workers");
  1460         // Use the most recent number of  active workers
  1461         assert(workers()->active_workers() > 0,
  1462                "Active workers not properly set");
  1463         set_par_threads(workers()->active_workers());
  1464         workers()->run_task(&rebuild_rs_task);
  1465         set_par_threads(0);
  1466         assert(check_heap_region_claim_values(
  1467                HeapRegion::RebuildRSClaimValue), "sanity check");
  1468         reset_heap_region_claim_values();
  1469       } else {
  1470         RebuildRSOutOfRegionClosure rebuild_rs(this);
  1471         heap_region_iterate(&rebuild_rs);
  1474       // Rebuild the strong code root lists for each region
  1475       rebuild_strong_code_roots();
  1477       if (true) { // FIXME
  1478         MetaspaceGC::compute_new_size();
  1481 #ifdef TRACESPINNING
  1482       ParallelTaskTerminator::print_termination_counts();
  1483 #endif
  1485       // Discard all rset updates
  1486       JavaThread::dirty_card_queue_set().abandon_logs();
  1487       assert(!G1DeferredRSUpdate
  1488              || (G1DeferredRSUpdate &&
  1489                 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1491       _young_list->reset_sampled_info();
  1492       // At this point there should be no regions in the
  1493       // entire heap tagged as young.
  1494       assert(check_young_list_empty(true /* check_heap */),
  1495              "young list should be empty at this point");
  1497       // Update the number of full collections that have been completed.
  1498       increment_old_marking_cycles_completed(false /* concurrent */);
  1500       _hrs.verify_optional();
  1501       verify_region_sets_optional();
  1503       verify_after_gc();
  1505       // Start a new incremental collection set for the next pause
  1506       assert(g1_policy()->collection_set() == NULL, "must be");
  1507       g1_policy()->start_incremental_cset_building();
  1509       clear_cset_fast_test();
  1511       init_mutator_alloc_region();
  1513       double end = os::elapsedTime();
  1514       g1_policy()->record_full_collection_end();
  1516       if (G1Log::fine()) {
  1517         g1_policy()->print_heap_transition();
  1520       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  1521       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  1522       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  1523       // before any GC notifications are raised.
  1524       g1mm()->update_sizes();
  1526       gc_epilogue(true);
  1529     if (G1Log::finer()) {
  1530       g1_policy()->print_detailed_heap_transition(true /* full */);
  1533     print_heap_after_gc();
  1534     trace_heap_after_gc(gc_tracer);
  1536     post_full_gc_dump(gc_timer);
  1538     gc_timer->register_gc_end();
  1539     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
  1542   return true;
  1545 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1546   // do_collection() will return whether it succeeded in performing
  1547   // the GC. Currently, there is no facility on the
  1548   // do_full_collection() API to notify the caller than the collection
  1549   // did not succeed (e.g., because it was locked out by the GC
  1550   // locker). So, right now, we'll ignore the return value.
  1551   bool dummy = do_collection(true,                /* explicit_gc */
  1552                              clear_all_soft_refs,
  1553                              0                    /* word_size */);
  1556 // This code is mostly copied from TenuredGeneration.
  1557 void
  1558 G1CollectedHeap::
  1559 resize_if_necessary_after_full_collection(size_t word_size) {
  1560   // Include the current allocation, if any, and bytes that will be
  1561   // pre-allocated to support collections, as "used".
  1562   const size_t used_after_gc = used();
  1563   const size_t capacity_after_gc = capacity();
  1564   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1566   // This is enforced in arguments.cpp.
  1567   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
  1568          "otherwise the code below doesn't make sense");
  1570   // We don't have floating point command-line arguments
  1571   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
  1572   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1573   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
  1574   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1576   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
  1577   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
  1579   // We have to be careful here as these two calculations can overflow
  1580   // 32-bit size_t's.
  1581   double used_after_gc_d = (double) used_after_gc;
  1582   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
  1583   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
  1585   // Let's make sure that they are both under the max heap size, which
  1586   // by default will make them fit into a size_t.
  1587   double desired_capacity_upper_bound = (double) max_heap_size;
  1588   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
  1589                                     desired_capacity_upper_bound);
  1590   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
  1591                                     desired_capacity_upper_bound);
  1593   // We can now safely turn them into size_t's.
  1594   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
  1595   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
  1597   // This assert only makes sense here, before we adjust them
  1598   // with respect to the min and max heap size.
  1599   assert(minimum_desired_capacity <= maximum_desired_capacity,
  1600          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
  1601                  "maximum_desired_capacity = "SIZE_FORMAT,
  1602                  minimum_desired_capacity, maximum_desired_capacity));
  1604   // Should not be greater than the heap max size. No need to adjust
  1605   // it with respect to the heap min size as it's a lower bound (i.e.,
  1606   // we'll try to make the capacity larger than it, not smaller).
  1607   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
  1608   // Should not be less than the heap min size. No need to adjust it
  1609   // with respect to the heap max size as it's an upper bound (i.e.,
  1610   // we'll try to make the capacity smaller than it, not greater).
  1611   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
  1613   if (capacity_after_gc < minimum_desired_capacity) {
  1614     // Don't expand unless it's significant
  1615     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1616     ergo_verbose4(ErgoHeapSizing,
  1617                   "attempt heap expansion",
  1618                   ergo_format_reason("capacity lower than "
  1619                                      "min desired capacity after Full GC")
  1620                   ergo_format_byte("capacity")
  1621                   ergo_format_byte("occupancy")
  1622                   ergo_format_byte_perc("min desired capacity"),
  1623                   capacity_after_gc, used_after_gc,
  1624                   minimum_desired_capacity, (double) MinHeapFreeRatio);
  1625     expand(expand_bytes);
  1627     // No expansion, now see if we want to shrink
  1628   } else if (capacity_after_gc > maximum_desired_capacity) {
  1629     // Capacity too large, compute shrinking size
  1630     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1631     ergo_verbose4(ErgoHeapSizing,
  1632                   "attempt heap shrinking",
  1633                   ergo_format_reason("capacity higher than "
  1634                                      "max desired capacity after Full GC")
  1635                   ergo_format_byte("capacity")
  1636                   ergo_format_byte("occupancy")
  1637                   ergo_format_byte_perc("max desired capacity"),
  1638                   capacity_after_gc, used_after_gc,
  1639                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
  1640     shrink(shrink_bytes);
  1645 HeapWord*
  1646 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
  1647                                            bool* succeeded) {
  1648   assert_at_safepoint(true /* should_be_vm_thread */);
  1650   *succeeded = true;
  1651   // Let's attempt the allocation first.
  1652   HeapWord* result =
  1653     attempt_allocation_at_safepoint(word_size,
  1654                                  false /* expect_null_mutator_alloc_region */);
  1655   if (result != NULL) {
  1656     assert(*succeeded, "sanity");
  1657     return result;
  1660   // In a G1 heap, we're supposed to keep allocation from failing by
  1661   // incremental pauses.  Therefore, at least for now, we'll favor
  1662   // expansion over collection.  (This might change in the future if we can
  1663   // do something smarter than full collection to satisfy a failed alloc.)
  1664   result = expand_and_allocate(word_size);
  1665   if (result != NULL) {
  1666     assert(*succeeded, "sanity");
  1667     return result;
  1670   // Expansion didn't work, we'll try to do a Full GC.
  1671   bool gc_succeeded = do_collection(false, /* explicit_gc */
  1672                                     false, /* clear_all_soft_refs */
  1673                                     word_size);
  1674   if (!gc_succeeded) {
  1675     *succeeded = false;
  1676     return NULL;
  1679   // Retry the allocation
  1680   result = attempt_allocation_at_safepoint(word_size,
  1681                                   true /* expect_null_mutator_alloc_region */);
  1682   if (result != NULL) {
  1683     assert(*succeeded, "sanity");
  1684     return result;
  1687   // Then, try a Full GC that will collect all soft references.
  1688   gc_succeeded = do_collection(false, /* explicit_gc */
  1689                                true,  /* clear_all_soft_refs */
  1690                                word_size);
  1691   if (!gc_succeeded) {
  1692     *succeeded = false;
  1693     return NULL;
  1696   // Retry the allocation once more
  1697   result = attempt_allocation_at_safepoint(word_size,
  1698                                   true /* expect_null_mutator_alloc_region */);
  1699   if (result != NULL) {
  1700     assert(*succeeded, "sanity");
  1701     return result;
  1704   assert(!collector_policy()->should_clear_all_soft_refs(),
  1705          "Flag should have been handled and cleared prior to this point");
  1707   // What else?  We might try synchronous finalization later.  If the total
  1708   // space available is large enough for the allocation, then a more
  1709   // complete compaction phase than we've tried so far might be
  1710   // appropriate.
  1711   assert(*succeeded, "sanity");
  1712   return NULL;
  1715 // Attempting to expand the heap sufficiently
  1716 // to support an allocation of the given "word_size".  If
  1717 // successful, perform the allocation and return the address of the
  1718 // allocated block, or else "NULL".
  1720 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1721   assert_at_safepoint(true /* should_be_vm_thread */);
  1723   verify_region_sets_optional();
  1725   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
  1726   ergo_verbose1(ErgoHeapSizing,
  1727                 "attempt heap expansion",
  1728                 ergo_format_reason("allocation request failed")
  1729                 ergo_format_byte("allocation request"),
  1730                 word_size * HeapWordSize);
  1731   if (expand(expand_bytes)) {
  1732     _hrs.verify_optional();
  1733     verify_region_sets_optional();
  1734     return attempt_allocation_at_safepoint(word_size,
  1735                                  false /* expect_null_mutator_alloc_region */);
  1737   return NULL;
  1740 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
  1741                                              HeapWord* new_end) {
  1742   assert(old_end != new_end, "don't call this otherwise");
  1743   assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
  1745   // Update the committed mem region.
  1746   _g1_committed.set_end(new_end);
  1747   // Tell the card table about the update.
  1748   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1749   // Tell the BOT about the update.
  1750   _bot_shared->resize(_g1_committed.word_size());
  1751   // Tell the hot card cache about the update
  1752   _cg1r->hot_card_cache()->resize_card_counts(capacity());
  1755 bool G1CollectedHeap::expand(size_t expand_bytes) {
  1756   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
  1757   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1758                                        HeapRegion::GrainBytes);
  1759   ergo_verbose2(ErgoHeapSizing,
  1760                 "expand the heap",
  1761                 ergo_format_byte("requested expansion amount")
  1762                 ergo_format_byte("attempted expansion amount"),
  1763                 expand_bytes, aligned_expand_bytes);
  1765   if (_g1_storage.uncommitted_size() == 0) {
  1766     ergo_verbose0(ErgoHeapSizing,
  1767                       "did not expand the heap",
  1768                       ergo_format_reason("heap already fully expanded"));
  1769     return false;
  1772   // First commit the memory.
  1773   HeapWord* old_end = (HeapWord*) _g1_storage.high();
  1774   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
  1775   if (successful) {
  1776     // Then propagate this update to the necessary data structures.
  1777     HeapWord* new_end = (HeapWord*) _g1_storage.high();
  1778     update_committed_space(old_end, new_end);
  1780     FreeRegionList expansion_list("Local Expansion List");
  1781     MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
  1782     assert(mr.start() == old_end, "post-condition");
  1783     // mr might be a smaller region than what was requested if
  1784     // expand_by() was unable to allocate the HeapRegion instances
  1785     assert(mr.end() <= new_end, "post-condition");
  1787     size_t actual_expand_bytes = mr.byte_size();
  1788     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
  1789     assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
  1790            "post-condition");
  1791     if (actual_expand_bytes < aligned_expand_bytes) {
  1792       // We could not expand _hrs to the desired size. In this case we
  1793       // need to shrink the committed space accordingly.
  1794       assert(mr.end() < new_end, "invariant");
  1796       size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
  1797       // First uncommit the memory.
  1798       _g1_storage.shrink_by(diff_bytes);
  1799       // Then propagate this update to the necessary data structures.
  1800       update_committed_space(new_end, mr.end());
  1802     _free_list.add_as_tail(&expansion_list);
  1804     if (_hr_printer.is_active()) {
  1805       HeapWord* curr = mr.start();
  1806       while (curr < mr.end()) {
  1807         HeapWord* curr_end = curr + HeapRegion::GrainWords;
  1808         _hr_printer.commit(curr, curr_end);
  1809         curr = curr_end;
  1811       assert(curr == mr.end(), "post-condition");
  1813     g1_policy()->record_new_heap_size(n_regions());
  1814   } else {
  1815     ergo_verbose0(ErgoHeapSizing,
  1816                   "did not expand the heap",
  1817                   ergo_format_reason("heap expansion operation failed"));
  1818     // The expansion of the virtual storage space was unsuccessful.
  1819     // Let's see if it was because we ran out of swap.
  1820     if (G1ExitOnExpansionFailure &&
  1821         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
  1822       // We had head room...
  1823       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
  1826   return successful;
  1829 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
  1830   size_t aligned_shrink_bytes =
  1831     ReservedSpace::page_align_size_down(shrink_bytes);
  1832   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1833                                          HeapRegion::GrainBytes);
  1834   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
  1836   uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
  1837   HeapWord* old_end = (HeapWord*) _g1_storage.high();
  1838   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
  1840   ergo_verbose3(ErgoHeapSizing,
  1841                 "shrink the heap",
  1842                 ergo_format_byte("requested shrinking amount")
  1843                 ergo_format_byte("aligned shrinking amount")
  1844                 ergo_format_byte("attempted shrinking amount"),
  1845                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
  1846   if (num_regions_removed > 0) {
  1847     _g1_storage.shrink_by(shrunk_bytes);
  1848     HeapWord* new_end = (HeapWord*) _g1_storage.high();
  1850     if (_hr_printer.is_active()) {
  1851       HeapWord* curr = old_end;
  1852       while (curr > new_end) {
  1853         HeapWord* curr_end = curr;
  1854         curr -= HeapRegion::GrainWords;
  1855         _hr_printer.uncommit(curr, curr_end);
  1859     _expansion_regions += num_regions_removed;
  1860     update_committed_space(old_end, new_end);
  1861     HeapRegionRemSet::shrink_heap(n_regions());
  1862     g1_policy()->record_new_heap_size(n_regions());
  1863   } else {
  1864     ergo_verbose0(ErgoHeapSizing,
  1865                   "did not shrink the heap",
  1866                   ergo_format_reason("heap shrinking operation failed"));
  1870 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1871   verify_region_sets_optional();
  1873   // We should only reach here at the end of a Full GC which means we
  1874   // should not not be holding to any GC alloc regions. The method
  1875   // below will make sure of that and do any remaining clean up.
  1876   abandon_gc_alloc_regions();
  1878   // Instead of tearing down / rebuilding the free lists here, we
  1879   // could instead use the remove_all_pending() method on free_list to
  1880   // remove only the ones that we need to remove.
  1881   tear_down_region_sets(true /* free_list_only */);
  1882   shrink_helper(shrink_bytes);
  1883   rebuild_region_sets(true /* free_list_only */);
  1885   _hrs.verify_optional();
  1886   verify_region_sets_optional();
  1889 // Public methods.
  1891 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1892 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1893 #endif // _MSC_VER
  1896 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1897   SharedHeap(policy_),
  1898   _g1_policy(policy_),
  1899   _dirty_card_queue_set(false),
  1900   _into_cset_dirty_card_queue_set(false),
  1901   _is_alive_closure_cm(this),
  1902   _is_alive_closure_stw(this),
  1903   _ref_processor_cm(NULL),
  1904   _ref_processor_stw(NULL),
  1905   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1906   _bot_shared(NULL),
  1907   _evac_failure_scan_stack(NULL),
  1908   _mark_in_progress(false),
  1909   _cg1r(NULL), _summary_bytes_used(0),
  1910   _g1mm(NULL),
  1911   _refine_cte_cl(NULL),
  1912   _full_collection(false),
  1913   _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
  1914   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
  1915   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
  1916   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
  1917   _free_regions_coming(false),
  1918   _young_list(new YoungList(this)),
  1919   _gc_time_stamp(0),
  1920   _retained_old_gc_alloc_region(NULL),
  1921   _survivor_plab_stats(YoungPLABSize, PLABWeight),
  1922   _old_plab_stats(OldPLABSize, PLABWeight),
  1923   _expand_heap_after_alloc_failure(true),
  1924   _surviving_young_words(NULL),
  1925   _old_marking_cycles_started(0),
  1926   _old_marking_cycles_completed(0),
  1927   _concurrent_cycle_started(false),
  1928   _in_cset_fast_test(),
  1929   _dirty_cards_region_list(NULL),
  1930   _worker_cset_start_region(NULL),
  1931   _worker_cset_start_region_time_stamp(NULL),
  1932   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
  1933   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
  1934   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
  1935   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
  1937   _g1h = this;
  1938   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1939     vm_exit_during_initialization("Failed necessary allocation.");
  1942   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
  1944   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1945   _task_queues = new RefToScanQueueSet(n_queues);
  1947   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1948   assert(n_rem_sets > 0, "Invariant.");
  1950   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
  1951   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
  1952   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
  1954   for (int i = 0; i < n_queues; i++) {
  1955     RefToScanQueue* q = new RefToScanQueue();
  1956     q->initialize();
  1957     _task_queues->register_queue(i, q);
  1958     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
  1960   clear_cset_start_regions();
  1962   // Initialize the G1EvacuationFailureALot counters and flags.
  1963   NOT_PRODUCT(reset_evacuation_should_fail();)
  1965   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1968 jint G1CollectedHeap::initialize() {
  1969   CollectedHeap::pre_initialize();
  1970   os::enable_vtime();
  1972   G1Log::init();
  1974   // Necessary to satisfy locking discipline assertions.
  1976   MutexLocker x(Heap_lock);
  1978   // We have to initialize the printer before committing the heap, as
  1979   // it will be used then.
  1980   _hr_printer.set_active(G1PrintHeapRegions);
  1982   // While there are no constraints in the GC code that HeapWordSize
  1983   // be any particular value, there are multiple other areas in the
  1984   // system which believe this to be true (e.g. oop->object_size in some
  1985   // cases incorrectly returns the size in wordSize units rather than
  1986   // HeapWordSize).
  1987   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  1989   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  1990   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  1991   size_t heap_alignment = collector_policy()->heap_alignment();
  1993   // Ensure that the sizes are properly aligned.
  1994   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1995   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1996   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
  1998   _refine_cte_cl = new RefineCardTableEntryClosure();
  2000   _cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);
  2002   // Reserve the maximum.
  2004   // When compressed oops are enabled, the preferred heap base
  2005   // is calculated by subtracting the requested size from the
  2006   // 32Gb boundary and using the result as the base address for
  2007   // heap reservation. If the requested size is not aligned to
  2008   // HeapRegion::GrainBytes (i.e. the alignment that is passed
  2009   // into the ReservedHeapSpace constructor) then the actual
  2010   // base of the reserved heap may end up differing from the
  2011   // address that was requested (i.e. the preferred heap base).
  2012   // If this happens then we could end up using a non-optimal
  2013   // compressed oops mode.
  2015   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
  2016                                                  heap_alignment);
  2018   // It is important to do this in a way such that concurrent readers can't
  2019   // temporarily think something is in the heap.  (I've actually seen this
  2020   // happen in asserts: DLD.)
  2021   _reserved.set_word_size(0);
  2022   _reserved.set_start((HeapWord*)heap_rs.base());
  2023   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  2025   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
  2027   // Create the gen rem set (and barrier set) for the entire reserved region.
  2028   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  2029   set_barrier_set(rem_set()->bs());
  2030   if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
  2031     vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
  2032     return JNI_ENOMEM;
  2035   // Also create a G1 rem set.
  2036   _g1_rem_set = new G1RemSet(this, g1_barrier_set());
  2038   // Carve out the G1 part of the heap.
  2040   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  2041   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  2042                            g1_rs.size()/HeapWordSize);
  2044   _g1_storage.initialize(g1_rs, 0);
  2045   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  2046   _hrs.initialize((HeapWord*) _g1_reserved.start(),
  2047                   (HeapWord*) _g1_reserved.end());
  2048   assert(_hrs.max_length() == _expansion_regions,
  2049          err_msg("max length: %u expansion regions: %u",
  2050                  _hrs.max_length(), _expansion_regions));
  2052   // Do later initialization work for concurrent refinement.
  2053   _cg1r->init();
  2055   // 6843694 - ensure that the maximum region index can fit
  2056   // in the remembered set structures.
  2057   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  2058   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  2060   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  2061   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  2062   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
  2063             "too many cards per region");
  2065   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
  2067   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  2068                                              heap_word_size(init_byte_size));
  2070   _g1h = this;
  2072   _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
  2074   // Create the ConcurrentMark data structure and thread.
  2075   // (Must do this late, so that "max_regions" is defined.)
  2076   _cm = new ConcurrentMark(this, heap_rs);
  2077   if (_cm == NULL || !_cm->completed_initialization()) {
  2078     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
  2079     return JNI_ENOMEM;
  2081   _cmThread = _cm->cmThread();
  2083   // Initialize the from_card cache structure of HeapRegionRemSet.
  2084   HeapRegionRemSet::init_heap(max_regions());
  2086   // Now expand into the initial heap size.
  2087   if (!expand(init_byte_size)) {
  2088     vm_shutdown_during_initialization("Failed to allocate initial heap.");
  2089     return JNI_ENOMEM;
  2092   // Perform any initialization actions delegated to the policy.
  2093   g1_policy()->init();
  2095   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  2096                                                SATB_Q_FL_lock,
  2097                                                G1SATBProcessCompletedThreshold,
  2098                                                Shared_SATB_Q_lock);
  2100   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
  2101                                                 DirtyCardQ_CBL_mon,
  2102                                                 DirtyCardQ_FL_lock,
  2103                                                 concurrent_g1_refine()->yellow_zone(),
  2104                                                 concurrent_g1_refine()->red_zone(),
  2105                                                 Shared_DirtyCardQ_lock);
  2107   if (G1DeferredRSUpdate) {
  2108     dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
  2109                                       DirtyCardQ_CBL_mon,
  2110                                       DirtyCardQ_FL_lock,
  2111                                       -1, // never trigger processing
  2112                                       -1, // no limit on length
  2113                                       Shared_DirtyCardQ_lock,
  2114                                       &JavaThread::dirty_card_queue_set());
  2117   // Initialize the card queue set used to hold cards containing
  2118   // references into the collection set.
  2119   _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
  2120                                              DirtyCardQ_CBL_mon,
  2121                                              DirtyCardQ_FL_lock,
  2122                                              -1, // never trigger processing
  2123                                              -1, // no limit on length
  2124                                              Shared_DirtyCardQ_lock,
  2125                                              &JavaThread::dirty_card_queue_set());
  2127   // In case we're keeping closure specialization stats, initialize those
  2128   // counts and that mechanism.
  2129   SpecializationStats::clear();
  2131   // Here we allocate the dummy full region that is required by the
  2132   // G1AllocRegion class. If we don't pass an address in the reserved
  2133   // space here, lots of asserts fire.
  2135   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
  2136                                              _g1_reserved.start());
  2137   // We'll re-use the same region whether the alloc region will
  2138   // require BOT updates or not and, if it doesn't, then a non-young
  2139   // region will complain that it cannot support allocations without
  2140   // BOT updates. So we'll tag the dummy region as young to avoid that.
  2141   dummy_region->set_young();
  2142   // Make sure it's full.
  2143   dummy_region->set_top(dummy_region->end());
  2144   G1AllocRegion::setup(this, dummy_region);
  2146   init_mutator_alloc_region();
  2148   // Do create of the monitoring and management support so that
  2149   // values in the heap have been properly initialized.
  2150   _g1mm = new G1MonitoringSupport(this);
  2152   G1StringDedup::initialize();
  2154   return JNI_OK;
  2157 void G1CollectedHeap::stop() {
  2158   // Stop all concurrent threads. We do this to make sure these threads
  2159   // do not continue to execute and access resources (e.g. gclog_or_tty)
  2160   // that are destroyed during shutdown.
  2161   _cg1r->stop();
  2162   _cmThread->stop();
  2163   if (G1StringDedup::is_enabled()) {
  2164     G1StringDedup::stop();
  2168 size_t G1CollectedHeap::conservative_max_heap_alignment() {
  2169   return HeapRegion::max_region_size();
  2172 void G1CollectedHeap::ref_processing_init() {
  2173   // Reference processing in G1 currently works as follows:
  2174   //
  2175   // * There are two reference processor instances. One is
  2176   //   used to record and process discovered references
  2177   //   during concurrent marking; the other is used to
  2178   //   record and process references during STW pauses
  2179   //   (both full and incremental).
  2180   // * Both ref processors need to 'span' the entire heap as
  2181   //   the regions in the collection set may be dotted around.
  2182   //
  2183   // * For the concurrent marking ref processor:
  2184   //   * Reference discovery is enabled at initial marking.
  2185   //   * Reference discovery is disabled and the discovered
  2186   //     references processed etc during remarking.
  2187   //   * Reference discovery is MT (see below).
  2188   //   * Reference discovery requires a barrier (see below).
  2189   //   * Reference processing may or may not be MT
  2190   //     (depending on the value of ParallelRefProcEnabled
  2191   //     and ParallelGCThreads).
  2192   //   * A full GC disables reference discovery by the CM
  2193   //     ref processor and abandons any entries on it's
  2194   //     discovered lists.
  2195   //
  2196   // * For the STW processor:
  2197   //   * Non MT discovery is enabled at the start of a full GC.
  2198   //   * Processing and enqueueing during a full GC is non-MT.
  2199   //   * During a full GC, references are processed after marking.
  2200   //
  2201   //   * Discovery (may or may not be MT) is enabled at the start
  2202   //     of an incremental evacuation pause.
  2203   //   * References are processed near the end of a STW evacuation pause.
  2204   //   * For both types of GC:
  2205   //     * Discovery is atomic - i.e. not concurrent.
  2206   //     * Reference discovery will not need a barrier.
  2208   SharedHeap::ref_processing_init();
  2209   MemRegion mr = reserved_region();
  2211   // Concurrent Mark ref processor
  2212   _ref_processor_cm =
  2213     new ReferenceProcessor(mr,    // span
  2214                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
  2215                                 // mt processing
  2216                            (int) ParallelGCThreads,
  2217                                 // degree of mt processing
  2218                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
  2219                                 // mt discovery
  2220                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
  2221                                 // degree of mt discovery
  2222                            false,
  2223                                 // Reference discovery is not atomic
  2224                            &_is_alive_closure_cm);
  2225                                 // is alive closure
  2226                                 // (for efficiency/performance)
  2228   // STW ref processor
  2229   _ref_processor_stw =
  2230     new ReferenceProcessor(mr,    // span
  2231                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
  2232                                 // mt processing
  2233                            MAX2((int)ParallelGCThreads, 1),
  2234                                 // degree of mt processing
  2235                            (ParallelGCThreads > 1),
  2236                                 // mt discovery
  2237                            MAX2((int)ParallelGCThreads, 1),
  2238                                 // degree of mt discovery
  2239                            true,
  2240                                 // Reference discovery is atomic
  2241                            &_is_alive_closure_stw);
  2242                                 // is alive closure
  2243                                 // (for efficiency/performance)
  2246 size_t G1CollectedHeap::capacity() const {
  2247   return _g1_committed.byte_size();
  2250 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
  2251   assert(!hr->continuesHumongous(), "pre-condition");
  2252   hr->reset_gc_time_stamp();
  2253   if (hr->startsHumongous()) {
  2254     uint first_index = hr->hrs_index() + 1;
  2255     uint last_index = hr->last_hc_index();
  2256     for (uint i = first_index; i < last_index; i += 1) {
  2257       HeapRegion* chr = region_at(i);
  2258       assert(chr->continuesHumongous(), "sanity");
  2259       chr->reset_gc_time_stamp();
  2264 #ifndef PRODUCT
  2265 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
  2266 private:
  2267   unsigned _gc_time_stamp;
  2268   bool _failures;
  2270 public:
  2271   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
  2272     _gc_time_stamp(gc_time_stamp), _failures(false) { }
  2274   virtual bool doHeapRegion(HeapRegion* hr) {
  2275     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
  2276     if (_gc_time_stamp != region_gc_time_stamp) {
  2277       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
  2278                              "expected %d", HR_FORMAT_PARAMS(hr),
  2279                              region_gc_time_stamp, _gc_time_stamp);
  2280       _failures = true;
  2282     return false;
  2285   bool failures() { return _failures; }
  2286 };
  2288 void G1CollectedHeap::check_gc_time_stamps() {
  2289   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
  2290   heap_region_iterate(&cl);
  2291   guarantee(!cl.failures(), "all GC time stamps should have been reset");
  2293 #endif // PRODUCT
  2295 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  2296                                                  DirtyCardQueue* into_cset_dcq,
  2297                                                  bool concurrent,
  2298                                                  uint worker_i) {
  2299   // Clean cards in the hot card cache
  2300   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
  2301   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
  2303   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2304   int n_completed_buffers = 0;
  2305   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
  2306     n_completed_buffers++;
  2308   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
  2309   dcqs.clear_n_completed_buffers();
  2310   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  2314 // Computes the sum of the storage used by the various regions.
  2316 size_t G1CollectedHeap::used() const {
  2317   assert(Heap_lock->owner() != NULL,
  2318          "Should be owned on this thread's behalf.");
  2319   size_t result = _summary_bytes_used;
  2320   // Read only once in case it is set to NULL concurrently
  2321   HeapRegion* hr = _mutator_alloc_region.get();
  2322   if (hr != NULL)
  2323     result += hr->used();
  2324   return result;
  2327 size_t G1CollectedHeap::used_unlocked() const {
  2328   size_t result = _summary_bytes_used;
  2329   return result;
  2332 class SumUsedClosure: public HeapRegionClosure {
  2333   size_t _used;
  2334 public:
  2335   SumUsedClosure() : _used(0) {}
  2336   bool doHeapRegion(HeapRegion* r) {
  2337     if (!r->continuesHumongous()) {
  2338       _used += r->used();
  2340     return false;
  2342   size_t result() { return _used; }
  2343 };
  2345 size_t G1CollectedHeap::recalculate_used() const {
  2346   double recalculate_used_start = os::elapsedTime();
  2348   SumUsedClosure blk;
  2349   heap_region_iterate(&blk);
  2351   g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
  2352   return blk.result();
  2355 size_t G1CollectedHeap::unsafe_max_alloc() {
  2356   if (free_regions() > 0) return HeapRegion::GrainBytes;
  2357   // otherwise, is there space in the current allocation region?
  2359   // We need to store the current allocation region in a local variable
  2360   // here. The problem is that this method doesn't take any locks and
  2361   // there may be other threads which overwrite the current allocation
  2362   // region field. attempt_allocation(), for example, sets it to NULL
  2363   // and this can happen *after* the NULL check here but before the call
  2364   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  2365   // to be a problem in the optimized build, since the two loads of the
  2366   // current allocation region field are optimized away.
  2367   HeapRegion* hr = _mutator_alloc_region.get();
  2368   if (hr == NULL) {
  2369     return 0;
  2371   return hr->free();
  2374 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
  2375   switch (cause) {
  2376     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
  2377     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
  2378     case GCCause::_g1_humongous_allocation: return true;
  2379     default:                                return false;
  2383 #ifndef PRODUCT
  2384 void G1CollectedHeap::allocate_dummy_regions() {
  2385   // Let's fill up most of the region
  2386   size_t word_size = HeapRegion::GrainWords - 1024;
  2387   // And as a result the region we'll allocate will be humongous.
  2388   guarantee(isHumongous(word_size), "sanity");
  2390   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
  2391     // Let's use the existing mechanism for the allocation
  2392     HeapWord* dummy_obj = humongous_obj_allocate(word_size);
  2393     if (dummy_obj != NULL) {
  2394       MemRegion mr(dummy_obj, word_size);
  2395       CollectedHeap::fill_with_object(mr);
  2396     } else {
  2397       // If we can't allocate once, we probably cannot allocate
  2398       // again. Let's get out of the loop.
  2399       break;
  2403 #endif // !PRODUCT
  2405 void G1CollectedHeap::increment_old_marking_cycles_started() {
  2406   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
  2407     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
  2408     err_msg("Wrong marking cycle count (started: %d, completed: %d)",
  2409     _old_marking_cycles_started, _old_marking_cycles_completed));
  2411   _old_marking_cycles_started++;
  2414 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
  2415   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  2417   // We assume that if concurrent == true, then the caller is a
  2418   // concurrent thread that was joined the Suspendible Thread
  2419   // Set. If there's ever a cheap way to check this, we should add an
  2420   // assert here.
  2422   // Given that this method is called at the end of a Full GC or of a
  2423   // concurrent cycle, and those can be nested (i.e., a Full GC can
  2424   // interrupt a concurrent cycle), the number of full collections
  2425   // completed should be either one (in the case where there was no
  2426   // nesting) or two (when a Full GC interrupted a concurrent cycle)
  2427   // behind the number of full collections started.
  2429   // This is the case for the inner caller, i.e. a Full GC.
  2430   assert(concurrent ||
  2431          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
  2432          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
  2433          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
  2434                  "is inconsistent with _old_marking_cycles_completed = %u",
  2435                  _old_marking_cycles_started, _old_marking_cycles_completed));
  2437   // This is the case for the outer caller, i.e. the concurrent cycle.
  2438   assert(!concurrent ||
  2439          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
  2440          err_msg("for outer caller (concurrent cycle): "
  2441                  "_old_marking_cycles_started = %u "
  2442                  "is inconsistent with _old_marking_cycles_completed = %u",
  2443                  _old_marking_cycles_started, _old_marking_cycles_completed));
  2445   _old_marking_cycles_completed += 1;
  2447   // We need to clear the "in_progress" flag in the CM thread before
  2448   // we wake up any waiters (especially when ExplicitInvokesConcurrent
  2449   // is set) so that if a waiter requests another System.gc() it doesn't
  2450   // incorrectly see that a marking cycle is still in progress.
  2451   if (concurrent) {
  2452     _cmThread->clear_in_progress();
  2455   // This notify_all() will ensure that a thread that called
  2456   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2457   // and it's waiting for a full GC to finish will be woken up. It is
  2458   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2459   FullGCCount_lock->notify_all();
  2462 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
  2463   _concurrent_cycle_started = true;
  2464   _gc_timer_cm->register_gc_start(start_time);
  2466   _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
  2467   trace_heap_before_gc(_gc_tracer_cm);
  2470 void G1CollectedHeap::register_concurrent_cycle_end() {
  2471   if (_concurrent_cycle_started) {
  2472     if (_cm->has_aborted()) {
  2473       _gc_tracer_cm->report_concurrent_mode_failure();
  2476     _gc_timer_cm->register_gc_end();
  2477     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
  2479     _concurrent_cycle_started = false;
  2483 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
  2484   if (_concurrent_cycle_started) {
  2485     trace_heap_after_gc(_gc_tracer_cm);
  2489 G1YCType G1CollectedHeap::yc_type() {
  2490   bool is_young = g1_policy()->gcs_are_young();
  2491   bool is_initial_mark = g1_policy()->during_initial_mark_pause();
  2492   bool is_during_mark = mark_in_progress();
  2494   if (is_initial_mark) {
  2495     return InitialMark;
  2496   } else if (is_during_mark) {
  2497     return DuringMark;
  2498   } else if (is_young) {
  2499     return Normal;
  2500   } else {
  2501     return Mixed;
  2505 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2506   assert_heap_not_locked();
  2508   unsigned int gc_count_before;
  2509   unsigned int old_marking_count_before;
  2510   bool retry_gc;
  2512   do {
  2513     retry_gc = false;
  2516       MutexLocker ml(Heap_lock);
  2518       // Read the GC count while holding the Heap_lock
  2519       gc_count_before = total_collections();
  2520       old_marking_count_before = _old_marking_cycles_started;
  2523     if (should_do_concurrent_full_gc(cause)) {
  2524       // Schedule an initial-mark evacuation pause that will start a
  2525       // concurrent cycle. We're setting word_size to 0 which means that
  2526       // we are not requesting a post-GC allocation.
  2527       VM_G1IncCollectionPause op(gc_count_before,
  2528                                  0,     /* word_size */
  2529                                  true,  /* should_initiate_conc_mark */
  2530                                  g1_policy()->max_pause_time_ms(),
  2531                                  cause);
  2533       VMThread::execute(&op);
  2534       if (!op.pause_succeeded()) {
  2535         if (old_marking_count_before == _old_marking_cycles_started) {
  2536           retry_gc = op.should_retry_gc();
  2537         } else {
  2538           // A Full GC happened while we were trying to schedule the
  2539           // initial-mark GC. No point in starting a new cycle given
  2540           // that the whole heap was collected anyway.
  2543         if (retry_gc) {
  2544           if (GC_locker::is_active_and_needs_gc()) {
  2545             GC_locker::stall_until_clear();
  2549     } else {
  2550       if (cause == GCCause::_gc_locker
  2551           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2553         // Schedule a standard evacuation pause. We're setting word_size
  2554         // to 0 which means that we are not requesting a post-GC allocation.
  2555         VM_G1IncCollectionPause op(gc_count_before,
  2556                                    0,     /* word_size */
  2557                                    false, /* should_initiate_conc_mark */
  2558                                    g1_policy()->max_pause_time_ms(),
  2559                                    cause);
  2560         VMThread::execute(&op);
  2561       } else {
  2562         // Schedule a Full GC.
  2563         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
  2564         VMThread::execute(&op);
  2567   } while (retry_gc);
  2570 bool G1CollectedHeap::is_in(const void* p) const {
  2571   if (_g1_committed.contains(p)) {
  2572     // Given that we know that p is in the committed space,
  2573     // heap_region_containing_raw() should successfully
  2574     // return the containing region.
  2575     HeapRegion* hr = heap_region_containing_raw(p);
  2576     return hr->is_in(p);
  2577   } else {
  2578     return false;
  2582 // Iteration functions.
  2584 // Iterates an OopClosure over all ref-containing fields of objects
  2585 // within a HeapRegion.
  2587 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  2588   MemRegion _mr;
  2589   ExtendedOopClosure* _cl;
  2590 public:
  2591   IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
  2592     : _mr(mr), _cl(cl) {}
  2593   bool doHeapRegion(HeapRegion* r) {
  2594     if (!r->continuesHumongous()) {
  2595       r->oop_iterate(_cl);
  2597     return false;
  2599 };
  2601 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
  2602   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  2603   heap_region_iterate(&blk);
  2606 void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
  2607   IterateOopClosureRegionClosure blk(mr, cl);
  2608   heap_region_iterate(&blk);
  2611 // Iterates an ObjectClosure over all objects within a HeapRegion.
  2613 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  2614   ObjectClosure* _cl;
  2615 public:
  2616   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  2617   bool doHeapRegion(HeapRegion* r) {
  2618     if (! r->continuesHumongous()) {
  2619       r->object_iterate(_cl);
  2621     return false;
  2623 };
  2625 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
  2626   IterateObjectClosureRegionClosure blk(cl);
  2627   heap_region_iterate(&blk);
  2630 // Calls a SpaceClosure on a HeapRegion.
  2632 class SpaceClosureRegionClosure: public HeapRegionClosure {
  2633   SpaceClosure* _cl;
  2634 public:
  2635   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  2636   bool doHeapRegion(HeapRegion* r) {
  2637     _cl->do_space(r);
  2638     return false;
  2640 };
  2642 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  2643   SpaceClosureRegionClosure blk(cl);
  2644   heap_region_iterate(&blk);
  2647 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
  2648   _hrs.iterate(cl);
  2651 void
  2652 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  2653                                                  uint worker_id,
  2654                                                  uint no_of_par_workers,
  2655                                                  jint claim_value) {
  2656   const uint regions = n_regions();
  2657   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  2658                              no_of_par_workers :
  2659                              1);
  2660   assert(UseDynamicNumberOfGCThreads ||
  2661          no_of_par_workers == workers()->total_workers(),
  2662          "Non dynamic should use fixed number of workers");
  2663   // try to spread out the starting points of the workers
  2664   const HeapRegion* start_hr =
  2665                         start_region_for_worker(worker_id, no_of_par_workers);
  2666   const uint start_index = start_hr->hrs_index();
  2668   // each worker will actually look at all regions
  2669   for (uint count = 0; count < regions; ++count) {
  2670     const uint index = (start_index + count) % regions;
  2671     assert(0 <= index && index < regions, "sanity");
  2672     HeapRegion* r = region_at(index);
  2673     // we'll ignore "continues humongous" regions (we'll process them
  2674     // when we come across their corresponding "start humongous"
  2675     // region) and regions already claimed
  2676     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  2677       continue;
  2679     // OK, try to claim it
  2680     if (r->claimHeapRegion(claim_value)) {
  2681       // success!
  2682       assert(!r->continuesHumongous(), "sanity");
  2683       if (r->startsHumongous()) {
  2684         // If the region is "starts humongous" we'll iterate over its
  2685         // "continues humongous" first; in fact we'll do them
  2686         // first. The order is important. In on case, calling the
  2687         // closure on the "starts humongous" region might de-allocate
  2688         // and clear all its "continues humongous" regions and, as a
  2689         // result, we might end up processing them twice. So, we'll do
  2690         // them first (notice: most closures will ignore them anyway) and
  2691         // then we'll do the "starts humongous" region.
  2692         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
  2693           HeapRegion* chr = region_at(ch_index);
  2695           // if the region has already been claimed or it's not
  2696           // "continues humongous" we're done
  2697           if (chr->claim_value() == claim_value ||
  2698               !chr->continuesHumongous()) {
  2699             break;
  2702           // No one should have claimed it directly. We can given
  2703           // that we claimed its "starts humongous" region.
  2704           assert(chr->claim_value() != claim_value, "sanity");
  2705           assert(chr->humongous_start_region() == r, "sanity");
  2707           if (chr->claimHeapRegion(claim_value)) {
  2708             // we should always be able to claim it; no one else should
  2709             // be trying to claim this region
  2711             bool res2 = cl->doHeapRegion(chr);
  2712             assert(!res2, "Should not abort");
  2714             // Right now, this holds (i.e., no closure that actually
  2715             // does something with "continues humongous" regions
  2716             // clears them). We might have to weaken it in the future,
  2717             // but let's leave these two asserts here for extra safety.
  2718             assert(chr->continuesHumongous(), "should still be the case");
  2719             assert(chr->humongous_start_region() == r, "sanity");
  2720           } else {
  2721             guarantee(false, "we should not reach here");
  2726       assert(!r->continuesHumongous(), "sanity");
  2727       bool res = cl->doHeapRegion(r);
  2728       assert(!res, "Should not abort");
  2733 class ResetClaimValuesClosure: public HeapRegionClosure {
  2734 public:
  2735   bool doHeapRegion(HeapRegion* r) {
  2736     r->set_claim_value(HeapRegion::InitialClaimValue);
  2737     return false;
  2739 };
  2741 void G1CollectedHeap::reset_heap_region_claim_values() {
  2742   ResetClaimValuesClosure blk;
  2743   heap_region_iterate(&blk);
  2746 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
  2747   ResetClaimValuesClosure blk;
  2748   collection_set_iterate(&blk);
  2751 #ifdef ASSERT
  2752 // This checks whether all regions in the heap have the correct claim
  2753 // value. I also piggy-backed on this a check to ensure that the
  2754 // humongous_start_region() information on "continues humongous"
  2755 // regions is correct.
  2757 class CheckClaimValuesClosure : public HeapRegionClosure {
  2758 private:
  2759   jint _claim_value;
  2760   uint _failures;
  2761   HeapRegion* _sh_region;
  2763 public:
  2764   CheckClaimValuesClosure(jint claim_value) :
  2765     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  2766   bool doHeapRegion(HeapRegion* r) {
  2767     if (r->claim_value() != _claim_value) {
  2768       gclog_or_tty->print_cr("Region " HR_FORMAT ", "
  2769                              "claim value = %d, should be %d",
  2770                              HR_FORMAT_PARAMS(r),
  2771                              r->claim_value(), _claim_value);
  2772       ++_failures;
  2774     if (!r->isHumongous()) {
  2775       _sh_region = NULL;
  2776     } else if (r->startsHumongous()) {
  2777       _sh_region = r;
  2778     } else if (r->continuesHumongous()) {
  2779       if (r->humongous_start_region() != _sh_region) {
  2780         gclog_or_tty->print_cr("Region " HR_FORMAT ", "
  2781                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  2782                                HR_FORMAT_PARAMS(r),
  2783                                r->humongous_start_region(),
  2784                                _sh_region);
  2785         ++_failures;
  2788     return false;
  2790   uint failures() { return _failures; }
  2791 };
  2793 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  2794   CheckClaimValuesClosure cl(claim_value);
  2795   heap_region_iterate(&cl);
  2796   return cl.failures() == 0;
  2799 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
  2800 private:
  2801   jint _claim_value;
  2802   uint _failures;
  2804 public:
  2805   CheckClaimValuesInCSetHRClosure(jint claim_value) :
  2806     _claim_value(claim_value), _failures(0) { }
  2808   uint failures() { return _failures; }
  2810   bool doHeapRegion(HeapRegion* hr) {
  2811     assert(hr->in_collection_set(), "how?");
  2812     assert(!hr->isHumongous(), "H-region in CSet");
  2813     if (hr->claim_value() != _claim_value) {
  2814       gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
  2815                              "claim value = %d, should be %d",
  2816                              HR_FORMAT_PARAMS(hr),
  2817                              hr->claim_value(), _claim_value);
  2818       _failures += 1;
  2820     return false;
  2822 };
  2824 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
  2825   CheckClaimValuesInCSetHRClosure cl(claim_value);
  2826   collection_set_iterate(&cl);
  2827   return cl.failures() == 0;
  2829 #endif // ASSERT
  2831 // Clear the cached CSet starting regions and (more importantly)
  2832 // the time stamps. Called when we reset the GC time stamp.
  2833 void G1CollectedHeap::clear_cset_start_regions() {
  2834   assert(_worker_cset_start_region != NULL, "sanity");
  2835   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
  2837   int n_queues = MAX2((int)ParallelGCThreads, 1);
  2838   for (int i = 0; i < n_queues; i++) {
  2839     _worker_cset_start_region[i] = NULL;
  2840     _worker_cset_start_region_time_stamp[i] = 0;
  2844 // Given the id of a worker, obtain or calculate a suitable
  2845 // starting region for iterating over the current collection set.
  2846 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
  2847   assert(get_gc_time_stamp() > 0, "should have been updated by now");
  2849   HeapRegion* result = NULL;
  2850   unsigned gc_time_stamp = get_gc_time_stamp();
  2852   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
  2853     // Cached starting region for current worker was set
  2854     // during the current pause - so it's valid.
  2855     // Note: the cached starting heap region may be NULL
  2856     // (when the collection set is empty).
  2857     result = _worker_cset_start_region[worker_i];
  2858     assert(result == NULL || result->in_collection_set(), "sanity");
  2859     return result;
  2862   // The cached entry was not valid so let's calculate
  2863   // a suitable starting heap region for this worker.
  2865   // We want the parallel threads to start their collection
  2866   // set iteration at different collection set regions to
  2867   // avoid contention.
  2868   // If we have:
  2869   //          n collection set regions
  2870   //          p threads
  2871   // Then thread t will start at region floor ((t * n) / p)
  2873   result = g1_policy()->collection_set();
  2874   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2875     uint cs_size = g1_policy()->cset_region_length();
  2876     uint active_workers = workers()->active_workers();
  2877     assert(UseDynamicNumberOfGCThreads ||
  2878              active_workers == workers()->total_workers(),
  2879              "Unless dynamic should use total workers");
  2881     uint end_ind   = (cs_size * worker_i) / active_workers;
  2882     uint start_ind = 0;
  2884     if (worker_i > 0 &&
  2885         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
  2886       // Previous workers starting region is valid
  2887       // so let's iterate from there
  2888       start_ind = (cs_size * (worker_i - 1)) / active_workers;
  2889       result = _worker_cset_start_region[worker_i - 1];
  2892     for (uint i = start_ind; i < end_ind; i++) {
  2893       result = result->next_in_collection_set();
  2897   // Note: the calculated starting heap region may be NULL
  2898   // (when the collection set is empty).
  2899   assert(result == NULL || result->in_collection_set(), "sanity");
  2900   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
  2901          "should be updated only once per pause");
  2902   _worker_cset_start_region[worker_i] = result;
  2903   OrderAccess::storestore();
  2904   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
  2905   return result;
  2908 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
  2909                                                      uint no_of_par_workers) {
  2910   uint worker_num =
  2911            G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
  2912   assert(UseDynamicNumberOfGCThreads ||
  2913          no_of_par_workers == workers()->total_workers(),
  2914          "Non dynamic should use fixed number of workers");
  2915   const uint start_index = n_regions() * worker_i / worker_num;
  2916   return region_at(start_index);
  2919 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2920   HeapRegion* r = g1_policy()->collection_set();
  2921   while (r != NULL) {
  2922     HeapRegion* next = r->next_in_collection_set();
  2923     if (cl->doHeapRegion(r)) {
  2924       cl->incomplete();
  2925       return;
  2927     r = next;
  2931 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2932                                                   HeapRegionClosure *cl) {
  2933   if (r == NULL) {
  2934     // The CSet is empty so there's nothing to do.
  2935     return;
  2938   assert(r->in_collection_set(),
  2939          "Start region must be a member of the collection set.");
  2940   HeapRegion* cur = r;
  2941   while (cur != NULL) {
  2942     HeapRegion* next = cur->next_in_collection_set();
  2943     if (cl->doHeapRegion(cur) && false) {
  2944       cl->incomplete();
  2945       return;
  2947     cur = next;
  2949   cur = g1_policy()->collection_set();
  2950   while (cur != r) {
  2951     HeapRegion* next = cur->next_in_collection_set();
  2952     if (cl->doHeapRegion(cur) && false) {
  2953       cl->incomplete();
  2954       return;
  2956     cur = next;
  2960 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
  2961   // We're not using an iterator given that it will wrap around when
  2962   // it reaches the last region and this is not what we want here.
  2963   for (uint index = from->hrs_index() + 1; index < n_regions(); index++) {
  2964     HeapRegion* hr = region_at(index);
  2965     if (!hr->isHumongous()) {
  2966       return hr;
  2969   return NULL;
  2972 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2973   Space* res = heap_region_containing(addr);
  2974   return res;
  2977 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2978   Space* sp = space_containing(addr);
  2979   if (sp != NULL) {
  2980     return sp->block_start(addr);
  2982   return NULL;
  2985 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  2986   Space* sp = space_containing(addr);
  2987   assert(sp != NULL, "block_size of address outside of heap");
  2988   return sp->block_size(addr);
  2991 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  2992   Space* sp = space_containing(addr);
  2993   return sp->block_is_obj(addr);
  2996 bool G1CollectedHeap::supports_tlab_allocation() const {
  2997   return true;
  3000 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  3001   return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
  3004 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
  3005   return young_list()->eden_used_bytes();
  3008 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
  3009 // must be smaller than the humongous object limit.
  3010 size_t G1CollectedHeap::max_tlab_size() const {
  3011   return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
  3014 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  3015   // Return the remaining space in the cur alloc region, but not less than
  3016   // the min TLAB size.
  3018   // Also, this value can be at most the humongous object threshold,
  3019   // since we can't allow tlabs to grow big enough to accommodate
  3020   // humongous objects.
  3022   HeapRegion* hr = _mutator_alloc_region.get();
  3023   size_t max_tlab = max_tlab_size() * wordSize;
  3024   if (hr == NULL) {
  3025     return max_tlab;
  3026   } else {
  3027     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
  3031 size_t G1CollectedHeap::max_capacity() const {
  3032   return _g1_reserved.byte_size();
  3035 jlong G1CollectedHeap::millis_since_last_gc() {
  3036   // assert(false, "NYI");
  3037   return 0;
  3040 void G1CollectedHeap::prepare_for_verify() {
  3041   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  3042     ensure_parsability(false);
  3044   g1_rem_set()->prepare_for_verify();
  3047 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
  3048                                               VerifyOption vo) {
  3049   switch (vo) {
  3050   case VerifyOption_G1UsePrevMarking:
  3051     return hr->obj_allocated_since_prev_marking(obj);
  3052   case VerifyOption_G1UseNextMarking:
  3053     return hr->obj_allocated_since_next_marking(obj);
  3054   case VerifyOption_G1UseMarkWord:
  3055     return false;
  3056   default:
  3057     ShouldNotReachHere();
  3059   return false; // keep some compilers happy
  3062 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
  3063   switch (vo) {
  3064   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
  3065   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
  3066   case VerifyOption_G1UseMarkWord:    return NULL;
  3067   default:                            ShouldNotReachHere();
  3069   return NULL; // keep some compilers happy
  3072 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
  3073   switch (vo) {
  3074   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
  3075   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
  3076   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
  3077   default:                            ShouldNotReachHere();
  3079   return false; // keep some compilers happy
  3082 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
  3083   switch (vo) {
  3084   case VerifyOption_G1UsePrevMarking: return "PTAMS";
  3085   case VerifyOption_G1UseNextMarking: return "NTAMS";
  3086   case VerifyOption_G1UseMarkWord:    return "NONE";
  3087   default:                            ShouldNotReachHere();
  3089   return NULL; // keep some compilers happy
  3092 class VerifyRootsClosure: public OopClosure {
  3093 private:
  3094   G1CollectedHeap* _g1h;
  3095   VerifyOption     _vo;
  3096   bool             _failures;
  3097 public:
  3098   // _vo == UsePrevMarking -> use "prev" marking information,
  3099   // _vo == UseNextMarking -> use "next" marking information,
  3100   // _vo == UseMarkWord    -> use mark word from object header.
  3101   VerifyRootsClosure(VerifyOption vo) :
  3102     _g1h(G1CollectedHeap::heap()),
  3103     _vo(vo),
  3104     _failures(false) { }
  3106   bool failures() { return _failures; }
  3108   template <class T> void do_oop_nv(T* p) {
  3109     T heap_oop = oopDesc::load_heap_oop(p);
  3110     if (!oopDesc::is_null(heap_oop)) {
  3111       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  3112       if (_g1h->is_obj_dead_cond(obj, _vo)) {
  3113         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  3114                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
  3115         if (_vo == VerifyOption_G1UseMarkWord) {
  3116           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
  3118         obj->print_on(gclog_or_tty);
  3119         _failures = true;
  3124   void do_oop(oop* p)       { do_oop_nv(p); }
  3125   void do_oop(narrowOop* p) { do_oop_nv(p); }
  3126 };
  3128 class G1VerifyCodeRootOopClosure: public OopClosure {
  3129   G1CollectedHeap* _g1h;
  3130   OopClosure* _root_cl;
  3131   nmethod* _nm;
  3132   VerifyOption _vo;
  3133   bool _failures;
  3135   template <class T> void do_oop_work(T* p) {
  3136     // First verify that this root is live
  3137     _root_cl->do_oop(p);
  3139     if (!G1VerifyHeapRegionCodeRoots) {
  3140       // We're not verifying the code roots attached to heap region.
  3141       return;
  3144     // Don't check the code roots during marking verification in a full GC
  3145     if (_vo == VerifyOption_G1UseMarkWord) {
  3146       return;
  3149     // Now verify that the current nmethod (which contains p) is
  3150     // in the code root list of the heap region containing the
  3151     // object referenced by p.
  3153     T heap_oop = oopDesc::load_heap_oop(p);
  3154     if (!oopDesc::is_null(heap_oop)) {
  3155       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  3157       // Now fetch the region containing the object
  3158       HeapRegion* hr = _g1h->heap_region_containing(obj);
  3159       HeapRegionRemSet* hrrs = hr->rem_set();
  3160       // Verify that the strong code root list for this region
  3161       // contains the nmethod
  3162       if (!hrrs->strong_code_roots_list_contains(_nm)) {
  3163         gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
  3164                               "from nmethod "PTR_FORMAT" not in strong "
  3165                               "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
  3166                               p, _nm, hr->bottom(), hr->end());
  3167         _failures = true;
  3172 public:
  3173   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
  3174     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
  3176   void do_oop(oop* p) { do_oop_work(p); }
  3177   void do_oop(narrowOop* p) { do_oop_work(p); }
  3179   void set_nmethod(nmethod* nm) { _nm = nm; }
  3180   bool failures() { return _failures; }
  3181 };
  3183 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
  3184   G1VerifyCodeRootOopClosure* _oop_cl;
  3186 public:
  3187   G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
  3188     _oop_cl(oop_cl) {}
  3190   void do_code_blob(CodeBlob* cb) {
  3191     nmethod* nm = cb->as_nmethod_or_null();
  3192     if (nm != NULL) {
  3193       _oop_cl->set_nmethod(nm);
  3194       nm->oops_do(_oop_cl);
  3197 };
  3199 class YoungRefCounterClosure : public OopClosure {
  3200   G1CollectedHeap* _g1h;
  3201   int              _count;
  3202  public:
  3203   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
  3204   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
  3205   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  3207   int count() { return _count; }
  3208   void reset_count() { _count = 0; };
  3209 };
  3211 class VerifyKlassClosure: public KlassClosure {
  3212   YoungRefCounterClosure _young_ref_counter_closure;
  3213   OopClosure *_oop_closure;
  3214  public:
  3215   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
  3216   void do_klass(Klass* k) {
  3217     k->oops_do(_oop_closure);
  3219     _young_ref_counter_closure.reset_count();
  3220     k->oops_do(&_young_ref_counter_closure);
  3221     if (_young_ref_counter_closure.count() > 0) {
  3222       guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
  3225 };
  3227 class VerifyLivenessOopClosure: public OopClosure {
  3228   G1CollectedHeap* _g1h;
  3229   VerifyOption _vo;
  3230 public:
  3231   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
  3232     _g1h(g1h), _vo(vo)
  3233   { }
  3234   void do_oop(narrowOop *p) { do_oop_work(p); }
  3235   void do_oop(      oop *p) { do_oop_work(p); }
  3237   template <class T> void do_oop_work(T *p) {
  3238     oop obj = oopDesc::load_decode_heap_oop(p);
  3239     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
  3240               "Dead object referenced by a not dead object");
  3242 };
  3244 class VerifyObjsInRegionClosure: public ObjectClosure {
  3245 private:
  3246   G1CollectedHeap* _g1h;
  3247   size_t _live_bytes;
  3248   HeapRegion *_hr;
  3249   VerifyOption _vo;
  3250 public:
  3251   // _vo == UsePrevMarking -> use "prev" marking information,
  3252   // _vo == UseNextMarking -> use "next" marking information,
  3253   // _vo == UseMarkWord    -> use mark word from object header.
  3254   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
  3255     : _live_bytes(0), _hr(hr), _vo(vo) {
  3256     _g1h = G1CollectedHeap::heap();
  3258   void do_object(oop o) {
  3259     VerifyLivenessOopClosure isLive(_g1h, _vo);
  3260     assert(o != NULL, "Huh?");
  3261     if (!_g1h->is_obj_dead_cond(o, _vo)) {
  3262       // If the object is alive according to the mark word,
  3263       // then verify that the marking information agrees.
  3264       // Note we can't verify the contra-positive of the
  3265       // above: if the object is dead (according to the mark
  3266       // word), it may not be marked, or may have been marked
  3267       // but has since became dead, or may have been allocated
  3268       // since the last marking.
  3269       if (_vo == VerifyOption_G1UseMarkWord) {
  3270         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
  3273       o->oop_iterate_no_header(&isLive);
  3274       if (!_hr->obj_allocated_since_prev_marking(o)) {
  3275         size_t obj_size = o->size();    // Make sure we don't overflow
  3276         _live_bytes += (obj_size * HeapWordSize);
  3280   size_t live_bytes() { return _live_bytes; }
  3281 };
  3283 class PrintObjsInRegionClosure : public ObjectClosure {
  3284   HeapRegion *_hr;
  3285   G1CollectedHeap *_g1;
  3286 public:
  3287   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  3288     _g1 = G1CollectedHeap::heap();
  3289   };
  3291   void do_object(oop o) {
  3292     if (o != NULL) {
  3293       HeapWord *start = (HeapWord *) o;
  3294       size_t word_sz = o->size();
  3295       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  3296                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  3297                           (void*) o, word_sz,
  3298                           _g1->isMarkedPrev(o),
  3299                           _g1->isMarkedNext(o),
  3300                           _hr->obj_allocated_since_prev_marking(o));
  3301       HeapWord *end = start + word_sz;
  3302       HeapWord *cur;
  3303       int *val;
  3304       for (cur = start; cur < end; cur++) {
  3305         val = (int *) cur;
  3306         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  3310 };
  3312 class VerifyRegionClosure: public HeapRegionClosure {
  3313 private:
  3314   bool             _par;
  3315   VerifyOption     _vo;
  3316   bool             _failures;
  3317 public:
  3318   // _vo == UsePrevMarking -> use "prev" marking information,
  3319   // _vo == UseNextMarking -> use "next" marking information,
  3320   // _vo == UseMarkWord    -> use mark word from object header.
  3321   VerifyRegionClosure(bool par, VerifyOption vo)
  3322     : _par(par),
  3323       _vo(vo),
  3324       _failures(false) {}
  3326   bool failures() {
  3327     return _failures;
  3330   bool doHeapRegion(HeapRegion* r) {
  3331     if (!r->continuesHumongous()) {
  3332       bool failures = false;
  3333       r->verify(_vo, &failures);
  3334       if (failures) {
  3335         _failures = true;
  3336       } else {
  3337         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
  3338         r->object_iterate(&not_dead_yet_cl);
  3339         if (_vo != VerifyOption_G1UseNextMarking) {
  3340           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
  3341             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
  3342                                    "max_live_bytes "SIZE_FORMAT" "
  3343                                    "< calculated "SIZE_FORMAT,
  3344                                    r->bottom(), r->end(),
  3345                                    r->max_live_bytes(),
  3346                                  not_dead_yet_cl.live_bytes());
  3347             _failures = true;
  3349         } else {
  3350           // When vo == UseNextMarking we cannot currently do a sanity
  3351           // check on the live bytes as the calculation has not been
  3352           // finalized yet.
  3356     return false; // stop the region iteration if we hit a failure
  3358 };
  3360 // This is the task used for parallel verification of the heap regions
  3362 class G1ParVerifyTask: public AbstractGangTask {
  3363 private:
  3364   G1CollectedHeap* _g1h;
  3365   VerifyOption     _vo;
  3366   bool             _failures;
  3368 public:
  3369   // _vo == UsePrevMarking -> use "prev" marking information,
  3370   // _vo == UseNextMarking -> use "next" marking information,
  3371   // _vo == UseMarkWord    -> use mark word from object header.
  3372   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
  3373     AbstractGangTask("Parallel verify task"),
  3374     _g1h(g1h),
  3375     _vo(vo),
  3376     _failures(false) { }
  3378   bool failures() {
  3379     return _failures;
  3382   void work(uint worker_id) {
  3383     HandleMark hm;
  3384     VerifyRegionClosure blk(true, _vo);
  3385     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
  3386                                           _g1h->workers()->active_workers(),
  3387                                           HeapRegion::ParVerifyClaimValue);
  3388     if (blk.failures()) {
  3389       _failures = true;
  3392 };
  3394 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
  3395   if (SafepointSynchronize::is_at_safepoint()) {
  3396     assert(Thread::current()->is_VM_thread(),
  3397            "Expected to be executed serially by the VM thread at this point");
  3399     if (!silent) { gclog_or_tty->print("Roots "); }
  3400     VerifyRootsClosure rootsCl(vo);
  3401     VerifyKlassClosure klassCl(this, &rootsCl);
  3402     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
  3404     // We apply the relevant closures to all the oops in the
  3405     // system dictionary, class loader data graph, the string table
  3406     // and the nmethods in the code cache.
  3407     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
  3408     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
  3410     process_all_roots(true,            // activate StrongRootsScope
  3411                       SO_AllCodeCache, // roots scanning options
  3412                       &rootsCl,
  3413                       &cldCl,
  3414                       &blobsCl);
  3416     bool failures = rootsCl.failures() || codeRootsCl.failures();
  3418     if (vo != VerifyOption_G1UseMarkWord) {
  3419       // If we're verifying during a full GC then the region sets
  3420       // will have been torn down at the start of the GC. Therefore
  3421       // verifying the region sets will fail. So we only verify
  3422       // the region sets when not in a full GC.
  3423       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
  3424       verify_region_sets();
  3427     if (!silent) { gclog_or_tty->print("HeapRegions "); }
  3428     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  3429       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3430              "sanity check");
  3432       G1ParVerifyTask task(this, vo);
  3433       assert(UseDynamicNumberOfGCThreads ||
  3434         workers()->active_workers() == workers()->total_workers(),
  3435         "If not dynamic should be using all the workers");
  3436       int n_workers = workers()->active_workers();
  3437       set_par_threads(n_workers);
  3438       workers()->run_task(&task);
  3439       set_par_threads(0);
  3440       if (task.failures()) {
  3441         failures = true;
  3444       // Checks that the expected amount of parallel work was done.
  3445       // The implication is that n_workers is > 0.
  3446       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  3447              "sanity check");
  3449       reset_heap_region_claim_values();
  3451       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3452              "sanity check");
  3453     } else {
  3454       VerifyRegionClosure blk(false, vo);
  3455       heap_region_iterate(&blk);
  3456       if (blk.failures()) {
  3457         failures = true;
  3460     if (!silent) gclog_or_tty->print("RemSet ");
  3461     rem_set()->verify();
  3463     if (G1StringDedup::is_enabled()) {
  3464       if (!silent) gclog_or_tty->print("StrDedup ");
  3465       G1StringDedup::verify();
  3468     if (failures) {
  3469       gclog_or_tty->print_cr("Heap:");
  3470       // It helps to have the per-region information in the output to
  3471       // help us track down what went wrong. This is why we call
  3472       // print_extended_on() instead of print_on().
  3473       print_extended_on(gclog_or_tty);
  3474       gclog_or_tty->cr();
  3475 #ifndef PRODUCT
  3476       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
  3477         concurrent_mark()->print_reachable("at-verification-failure",
  3478                                            vo, false /* all */);
  3480 #endif
  3481       gclog_or_tty->flush();
  3483     guarantee(!failures, "there should not have been any failures");
  3484   } else {
  3485     if (!silent) {
  3486       gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
  3487       if (G1StringDedup::is_enabled()) {
  3488         gclog_or_tty->print(", StrDedup");
  3490       gclog_or_tty->print(") ");
  3495 void G1CollectedHeap::verify(bool silent) {
  3496   verify(silent, VerifyOption_G1UsePrevMarking);
  3499 double G1CollectedHeap::verify(bool guard, const char* msg) {
  3500   double verify_time_ms = 0.0;
  3502   if (guard && total_collections() >= VerifyGCStartAt) {
  3503     double verify_start = os::elapsedTime();
  3504     HandleMark hm;  // Discard invalid handles created during verification
  3505     prepare_for_verify();
  3506     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
  3507     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
  3510   return verify_time_ms;
  3513 void G1CollectedHeap::verify_before_gc() {
  3514   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
  3515   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
  3518 void G1CollectedHeap::verify_after_gc() {
  3519   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
  3520   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
  3523 class PrintRegionClosure: public HeapRegionClosure {
  3524   outputStream* _st;
  3525 public:
  3526   PrintRegionClosure(outputStream* st) : _st(st) {}
  3527   bool doHeapRegion(HeapRegion* r) {
  3528     r->print_on(_st);
  3529     return false;
  3531 };
  3533 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
  3534                                        const HeapRegion* hr,
  3535                                        const VerifyOption vo) const {
  3536   switch (vo) {
  3537   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
  3538   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
  3539   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
  3540   default:                            ShouldNotReachHere();
  3542   return false; // keep some compilers happy
  3545 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
  3546                                        const VerifyOption vo) const {
  3547   switch (vo) {
  3548   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
  3549   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
  3550   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
  3551   default:                            ShouldNotReachHere();
  3553   return false; // keep some compilers happy
  3556 void G1CollectedHeap::print_on(outputStream* st) const {
  3557   st->print(" %-20s", "garbage-first heap");
  3558   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  3559             capacity()/K, used_unlocked()/K);
  3560   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  3561             _g1_storage.low_boundary(),
  3562             _g1_storage.high(),
  3563             _g1_storage.high_boundary());
  3564   st->cr();
  3565   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
  3566   uint young_regions = _young_list->length();
  3567   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
  3568             (size_t) young_regions * HeapRegion::GrainBytes / K);
  3569   uint survivor_regions = g1_policy()->recorded_survivor_regions();
  3570   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
  3571             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
  3572   st->cr();
  3573   MetaspaceAux::print_on(st);
  3576 void G1CollectedHeap::print_extended_on(outputStream* st) const {
  3577   print_on(st);
  3579   // Print the per-region information.
  3580   st->cr();
  3581   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
  3582                "HS=humongous(starts), HC=humongous(continues), "
  3583                "CS=collection set, F=free, TS=gc time stamp, "
  3584                "PTAMS=previous top-at-mark-start, "
  3585                "NTAMS=next top-at-mark-start)");
  3586   PrintRegionClosure blk(st);
  3587   heap_region_iterate(&blk);
  3590 void G1CollectedHeap::print_on_error(outputStream* st) const {
  3591   this->CollectedHeap::print_on_error(st);
  3593   if (_cm != NULL) {
  3594     st->cr();
  3595     _cm->print_on_error(st);
  3599 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  3600   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3601     workers()->print_worker_threads_on(st);
  3603   _cmThread->print_on(st);
  3604   st->cr();
  3605   _cm->print_worker_threads_on(st);
  3606   _cg1r->print_worker_threads_on(st);
  3607   if (G1StringDedup::is_enabled()) {
  3608     G1StringDedup::print_worker_threads_on(st);
  3612 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  3613   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3614     workers()->threads_do(tc);
  3616   tc->do_thread(_cmThread);
  3617   _cg1r->threads_do(tc);
  3618   if (G1StringDedup::is_enabled()) {
  3619     G1StringDedup::threads_do(tc);
  3623 void G1CollectedHeap::print_tracing_info() const {
  3624   // We'll overload this to mean "trace GC pause statistics."
  3625   if (TraceGen0Time || TraceGen1Time) {
  3626     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  3627     // to that.
  3628     g1_policy()->print_tracing_info();
  3630   if (G1SummarizeRSetStats) {
  3631     g1_rem_set()->print_summary_info();
  3633   if (G1SummarizeConcMark) {
  3634     concurrent_mark()->print_summary_info();
  3636   g1_policy()->print_yg_surv_rate_info();
  3637   SpecializationStats::print();
  3640 #ifndef PRODUCT
  3641 // Helpful for debugging RSet issues.
  3643 class PrintRSetsClosure : public HeapRegionClosure {
  3644 private:
  3645   const char* _msg;
  3646   size_t _occupied_sum;
  3648 public:
  3649   bool doHeapRegion(HeapRegion* r) {
  3650     HeapRegionRemSet* hrrs = r->rem_set();
  3651     size_t occupied = hrrs->occupied();
  3652     _occupied_sum += occupied;
  3654     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
  3655                            HR_FORMAT_PARAMS(r));
  3656     if (occupied == 0) {
  3657       gclog_or_tty->print_cr("  RSet is empty");
  3658     } else {
  3659       hrrs->print();
  3661     gclog_or_tty->print_cr("----------");
  3662     return false;
  3665   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
  3666     gclog_or_tty->cr();
  3667     gclog_or_tty->print_cr("========================================");
  3668     gclog_or_tty->print_cr("%s", msg);
  3669     gclog_or_tty->cr();
  3672   ~PrintRSetsClosure() {
  3673     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
  3674     gclog_or_tty->print_cr("========================================");
  3675     gclog_or_tty->cr();
  3677 };
  3679 void G1CollectedHeap::print_cset_rsets() {
  3680   PrintRSetsClosure cl("Printing CSet RSets");
  3681   collection_set_iterate(&cl);
  3684 void G1CollectedHeap::print_all_rsets() {
  3685   PrintRSetsClosure cl("Printing All RSets");;
  3686   heap_region_iterate(&cl);
  3688 #endif // PRODUCT
  3690 G1CollectedHeap* G1CollectedHeap::heap() {
  3691   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  3692          "not a garbage-first heap");
  3693   return _g1h;
  3696 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  3697   // always_do_update_barrier = false;
  3698   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  3699   // Fill TLAB's and such
  3700   accumulate_statistics_all_tlabs();
  3701   ensure_parsability(true);
  3703   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
  3704       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  3705     g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
  3709 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  3711   if (G1SummarizeRSetStats &&
  3712       (G1SummarizeRSetStatsPeriod > 0) &&
  3713       // we are at the end of the GC. Total collections has already been increased.
  3714       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
  3715     g1_rem_set()->print_periodic_summary_info("After GC RS summary");
  3718   // FIXME: what is this about?
  3719   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  3720   // is set.
  3721   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  3722                         "derived pointer present"));
  3723   // always_do_update_barrier = true;
  3725   resize_all_tlabs();
  3727   // We have just completed a GC. Update the soft reference
  3728   // policy with the new heap occupancy
  3729   Universe::update_heap_info_at_gc();
  3732 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  3733                                                unsigned int gc_count_before,
  3734                                                bool* succeeded,
  3735                                                GCCause::Cause gc_cause) {
  3736   assert_heap_not_locked_and_not_at_safepoint();
  3737   g1_policy()->record_stop_world_start();
  3738   VM_G1IncCollectionPause op(gc_count_before,
  3739                              word_size,
  3740                              false, /* should_initiate_conc_mark */
  3741                              g1_policy()->max_pause_time_ms(),
  3742                              gc_cause);
  3743   VMThread::execute(&op);
  3745   HeapWord* result = op.result();
  3746   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
  3747   assert(result == NULL || ret_succeeded,
  3748          "the result should be NULL if the VM did not succeed");
  3749   *succeeded = ret_succeeded;
  3751   assert_heap_not_locked();
  3752   return result;
  3755 void
  3756 G1CollectedHeap::doConcurrentMark() {
  3757   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  3758   if (!_cmThread->in_progress()) {
  3759     _cmThread->set_started();
  3760     CGC_lock->notify();
  3764 size_t G1CollectedHeap::pending_card_num() {
  3765   size_t extra_cards = 0;
  3766   JavaThread *curr = Threads::first();
  3767   while (curr != NULL) {
  3768     DirtyCardQueue& dcq = curr->dirty_card_queue();
  3769     extra_cards += dcq.size();
  3770     curr = curr->next();
  3772   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3773   size_t buffer_size = dcqs.buffer_size();
  3774   size_t buffer_num = dcqs.completed_buffers_num();
  3776   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
  3777   // in bytes - not the number of 'entries'. We need to convert
  3778   // into a number of cards.
  3779   return (buffer_size * buffer_num + extra_cards) / oopSize;
  3782 size_t G1CollectedHeap::cards_scanned() {
  3783   return g1_rem_set()->cardsScanned();
  3786 void
  3787 G1CollectedHeap::setup_surviving_young_words() {
  3788   assert(_surviving_young_words == NULL, "pre-condition");
  3789   uint array_length = g1_policy()->young_cset_region_length();
  3790   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
  3791   if (_surviving_young_words == NULL) {
  3792     vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
  3793                           "Not enough space for young surv words summary.");
  3795   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
  3796 #ifdef ASSERT
  3797   for (uint i = 0;  i < array_length; ++i) {
  3798     assert( _surviving_young_words[i] == 0, "memset above" );
  3800 #endif // !ASSERT
  3803 void
  3804 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  3805   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3806   uint array_length = g1_policy()->young_cset_region_length();
  3807   for (uint i = 0; i < array_length; ++i) {
  3808     _surviving_young_words[i] += surv_young_words[i];
  3812 void
  3813 G1CollectedHeap::cleanup_surviving_young_words() {
  3814   guarantee( _surviving_young_words != NULL, "pre-condition" );
  3815   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
  3816   _surviving_young_words = NULL;
  3819 #ifdef ASSERT
  3820 class VerifyCSetClosure: public HeapRegionClosure {
  3821 public:
  3822   bool doHeapRegion(HeapRegion* hr) {
  3823     // Here we check that the CSet region's RSet is ready for parallel
  3824     // iteration. The fields that we'll verify are only manipulated
  3825     // when the region is part of a CSet and is collected. Afterwards,
  3826     // we reset these fields when we clear the region's RSet (when the
  3827     // region is freed) so they are ready when the region is
  3828     // re-allocated. The only exception to this is if there's an
  3829     // evacuation failure and instead of freeing the region we leave
  3830     // it in the heap. In that case, we reset these fields during
  3831     // evacuation failure handling.
  3832     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
  3834     // Here's a good place to add any other checks we'd like to
  3835     // perform on CSet regions.
  3836     return false;
  3838 };
  3839 #endif // ASSERT
  3841 #if TASKQUEUE_STATS
  3842 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
  3843   st->print_raw_cr("GC Task Stats");
  3844   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
  3845   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
  3848 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
  3849   print_taskqueue_stats_hdr(st);
  3851   TaskQueueStats totals;
  3852   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3853   for (int i = 0; i < n; ++i) {
  3854     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
  3855     totals += task_queue(i)->stats;
  3857   st->print_raw("tot "); totals.print(st); st->cr();
  3859   DEBUG_ONLY(totals.verify());
  3862 void G1CollectedHeap::reset_taskqueue_stats() {
  3863   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3864   for (int i = 0; i < n; ++i) {
  3865     task_queue(i)->stats.reset();
  3868 #endif // TASKQUEUE_STATS
  3870 void G1CollectedHeap::log_gc_header() {
  3871   if (!G1Log::fine()) {
  3872     return;
  3875   gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
  3877   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
  3878     .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
  3879     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
  3881   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
  3884 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
  3885   if (!G1Log::fine()) {
  3886     return;
  3889   if (G1Log::finer()) {
  3890     if (evacuation_failed()) {
  3891       gclog_or_tty->print(" (to-space exhausted)");
  3893     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
  3894     g1_policy()->phase_times()->note_gc_end();
  3895     g1_policy()->phase_times()->print(pause_time_sec);
  3896     g1_policy()->print_detailed_heap_transition();
  3897   } else {
  3898     if (evacuation_failed()) {
  3899       gclog_or_tty->print("--");
  3901     g1_policy()->print_heap_transition();
  3902     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
  3904   gclog_or_tty->flush();
  3907 bool
  3908 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
  3909   assert_at_safepoint(true /* should_be_vm_thread */);
  3910   guarantee(!is_gc_active(), "collection is not reentrant");
  3912   if (GC_locker::check_active_before_gc()) {
  3913     return false;
  3916   _gc_timer_stw->register_gc_start();
  3918   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
  3920   SvcGCMarker sgcm(SvcGCMarker::MINOR);
  3921   ResourceMark rm;
  3923   print_heap_before_gc();
  3924   trace_heap_before_gc(_gc_tracer_stw);
  3926   verify_region_sets_optional();
  3927   verify_dirty_young_regions();
  3929   // This call will decide whether this pause is an initial-mark
  3930   // pause. If it is, during_initial_mark_pause() will return true
  3931   // for the duration of this pause.
  3932   g1_policy()->decide_on_conc_mark_initiation();
  3934   // We do not allow initial-mark to be piggy-backed on a mixed GC.
  3935   assert(!g1_policy()->during_initial_mark_pause() ||
  3936           g1_policy()->gcs_are_young(), "sanity");
  3938   // We also do not allow mixed GCs during marking.
  3939   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
  3941   // Record whether this pause is an initial mark. When the current
  3942   // thread has completed its logging output and it's safe to signal
  3943   // the CM thread, the flag's value in the policy has been reset.
  3944   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
  3946   // Inner scope for scope based logging, timers, and stats collection
  3948     EvacuationInfo evacuation_info;
  3950     if (g1_policy()->during_initial_mark_pause()) {
  3951       // We are about to start a marking cycle, so we increment the
  3952       // full collection counter.
  3953       increment_old_marking_cycles_started();
  3954       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
  3957     _gc_tracer_stw->report_yc_type(yc_type());
  3959     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  3961     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  3962                                 workers()->active_workers() : 1);
  3963     double pause_start_sec = os::elapsedTime();
  3964     g1_policy()->phase_times()->note_gc_start(active_workers);
  3965     log_gc_header();
  3967     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
  3968     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
  3970     // If the secondary_free_list is not empty, append it to the
  3971     // free_list. No need to wait for the cleanup operation to finish;
  3972     // the region allocation code will check the secondary_free_list
  3973     // and wait if necessary. If the G1StressConcRegionFreeing flag is
  3974     // set, skip this step so that the region allocation code has to
  3975     // get entries from the secondary_free_list.
  3976     if (!G1StressConcRegionFreeing) {
  3977       append_secondary_free_list_if_not_empty_with_lock();
  3980     assert(check_young_list_well_formed(), "young list should be well formed");
  3981     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3982            "sanity check");
  3984     // Don't dynamically change the number of GC threads this early.  A value of
  3985     // 0 is used to indicate serial work.  When parallel work is done,
  3986     // it will be set.
  3988     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  3989       IsGCActiveMark x;
  3991       gc_prologue(false);
  3992       increment_total_collections(false /* full gc */);
  3993       increment_gc_time_stamp();
  3995       verify_before_gc();
  3997       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3999       // Please see comment in g1CollectedHeap.hpp and
  4000       // G1CollectedHeap::ref_processing_init() to see how
  4001       // reference processing currently works in G1.
  4003       // Enable discovery in the STW reference processor
  4004       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
  4005                                             true /*verify_no_refs*/);
  4008         // We want to temporarily turn off discovery by the
  4009         // CM ref processor, if necessary, and turn it back on
  4010         // on again later if we do. Using a scoped
  4011         // NoRefDiscovery object will do this.
  4012         NoRefDiscovery no_cm_discovery(ref_processor_cm());
  4014         // Forget the current alloc region (we might even choose it to be part
  4015         // of the collection set!).
  4016         release_mutator_alloc_region();
  4018         // We should call this after we retire the mutator alloc
  4019         // region(s) so that all the ALLOC / RETIRE events are generated
  4020         // before the start GC event.
  4021         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
  4023         // This timing is only used by the ergonomics to handle our pause target.
  4024         // It is unclear why this should not include the full pause. We will
  4025         // investigate this in CR 7178365.
  4026         //
  4027         // Preserving the old comment here if that helps the investigation:
  4028         //
  4029         // The elapsed time induced by the start time below deliberately elides
  4030         // the possible verification above.
  4031         double sample_start_time_sec = os::elapsedTime();
  4033 #if YOUNG_LIST_VERBOSE
  4034         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  4035         _young_list->print();
  4036         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  4037 #endif // YOUNG_LIST_VERBOSE
  4039         g1_policy()->record_collection_pause_start(sample_start_time_sec);
  4041         double scan_wait_start = os::elapsedTime();
  4042         // We have to wait until the CM threads finish scanning the
  4043         // root regions as it's the only way to ensure that all the
  4044         // objects on them have been correctly scanned before we start
  4045         // moving them during the GC.
  4046         bool waited = _cm->root_regions()->wait_until_scan_finished();
  4047         double wait_time_ms = 0.0;
  4048         if (waited) {
  4049           double scan_wait_end = os::elapsedTime();
  4050           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
  4052         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
  4054 #if YOUNG_LIST_VERBOSE
  4055         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  4056         _young_list->print();
  4057 #endif // YOUNG_LIST_VERBOSE
  4059         if (g1_policy()->during_initial_mark_pause()) {
  4060           concurrent_mark()->checkpointRootsInitialPre();
  4063 #if YOUNG_LIST_VERBOSE
  4064         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  4065         _young_list->print();
  4066         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  4067 #endif // YOUNG_LIST_VERBOSE
  4069         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
  4071         _cm->note_start_of_gc();
  4072         // We should not verify the per-thread SATB buffers given that
  4073         // we have not filtered them yet (we'll do so during the
  4074         // GC). We also call this after finalize_cset() to
  4075         // ensure that the CSet has been finalized.
  4076         _cm->verify_no_cset_oops(true  /* verify_stacks */,
  4077                                  true  /* verify_enqueued_buffers */,
  4078                                  false /* verify_thread_buffers */,
  4079                                  true  /* verify_fingers */);
  4081         if (_hr_printer.is_active()) {
  4082           HeapRegion* hr = g1_policy()->collection_set();
  4083           while (hr != NULL) {
  4084             G1HRPrinter::RegionType type;
  4085             if (!hr->is_young()) {
  4086               type = G1HRPrinter::Old;
  4087             } else if (hr->is_survivor()) {
  4088               type = G1HRPrinter::Survivor;
  4089             } else {
  4090               type = G1HRPrinter::Eden;
  4092             _hr_printer.cset(hr);
  4093             hr = hr->next_in_collection_set();
  4097 #ifdef ASSERT
  4098         VerifyCSetClosure cl;
  4099         collection_set_iterate(&cl);
  4100 #endif // ASSERT
  4102         setup_surviving_young_words();
  4104         // Initialize the GC alloc regions.
  4105         init_gc_alloc_regions(evacuation_info);
  4107         // Actually do the work...
  4108         evacuate_collection_set(evacuation_info);
  4110         // We do this to mainly verify the per-thread SATB buffers
  4111         // (which have been filtered by now) since we didn't verify
  4112         // them earlier. No point in re-checking the stacks / enqueued
  4113         // buffers given that the CSet has not changed since last time
  4114         // we checked.
  4115         _cm->verify_no_cset_oops(false /* verify_stacks */,
  4116                                  false /* verify_enqueued_buffers */,
  4117                                  true  /* verify_thread_buffers */,
  4118                                  true  /* verify_fingers */);
  4120         free_collection_set(g1_policy()->collection_set(), evacuation_info);
  4121         g1_policy()->clear_collection_set();
  4123         cleanup_surviving_young_words();
  4125         // Start a new incremental collection set for the next pause.
  4126         g1_policy()->start_incremental_cset_building();
  4128         clear_cset_fast_test();
  4130         _young_list->reset_sampled_info();
  4132         // Don't check the whole heap at this point as the
  4133         // GC alloc regions from this pause have been tagged
  4134         // as survivors and moved on to the survivor list.
  4135         // Survivor regions will fail the !is_young() check.
  4136         assert(check_young_list_empty(false /* check_heap */),
  4137           "young list should be empty");
  4139 #if YOUNG_LIST_VERBOSE
  4140         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
  4141         _young_list->print();
  4142 #endif // YOUNG_LIST_VERBOSE
  4144         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  4145                                              _young_list->first_survivor_region(),
  4146                                              _young_list->last_survivor_region());
  4148         _young_list->reset_auxilary_lists();
  4150         if (evacuation_failed()) {
  4151           _summary_bytes_used = recalculate_used();
  4152           uint n_queues = MAX2((int)ParallelGCThreads, 1);
  4153           for (uint i = 0; i < n_queues; i++) {
  4154             if (_evacuation_failed_info_array[i].has_failed()) {
  4155               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
  4158         } else {
  4159           // The "used" of the the collection set have already been subtracted
  4160           // when they were freed.  Add in the bytes evacuated.
  4161           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
  4164         if (g1_policy()->during_initial_mark_pause()) {
  4165           // We have to do this before we notify the CM threads that
  4166           // they can start working to make sure that all the
  4167           // appropriate initialization is done on the CM object.
  4168           concurrent_mark()->checkpointRootsInitialPost();
  4169           set_marking_started();
  4170           // Note that we don't actually trigger the CM thread at
  4171           // this point. We do that later when we're sure that
  4172           // the current thread has completed its logging output.
  4175         allocate_dummy_regions();
  4177 #if YOUNG_LIST_VERBOSE
  4178         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  4179         _young_list->print();
  4180         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  4181 #endif // YOUNG_LIST_VERBOSE
  4183         init_mutator_alloc_region();
  4186           size_t expand_bytes = g1_policy()->expansion_amount();
  4187           if (expand_bytes > 0) {
  4188             size_t bytes_before = capacity();
  4189             // No need for an ergo verbose message here,
  4190             // expansion_amount() does this when it returns a value > 0.
  4191             if (!expand(expand_bytes)) {
  4192               // We failed to expand the heap so let's verify that
  4193               // committed/uncommitted amount match the backing store
  4194               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
  4195               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
  4200         // We redo the verification but now wrt to the new CSet which
  4201         // has just got initialized after the previous CSet was freed.
  4202         _cm->verify_no_cset_oops(true  /* verify_stacks */,
  4203                                  true  /* verify_enqueued_buffers */,
  4204                                  true  /* verify_thread_buffers */,
  4205                                  true  /* verify_fingers */);
  4206         _cm->note_end_of_gc();
  4208         // This timing is only used by the ergonomics to handle our pause target.
  4209         // It is unclear why this should not include the full pause. We will
  4210         // investigate this in CR 7178365.
  4211         double sample_end_time_sec = os::elapsedTime();
  4212         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
  4213         g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
  4215         MemoryService::track_memory_usage();
  4217         // In prepare_for_verify() below we'll need to scan the deferred
  4218         // update buffers to bring the RSets up-to-date if
  4219         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
  4220         // the update buffers we'll probably need to scan cards on the
  4221         // regions we just allocated to (i.e., the GC alloc
  4222         // regions). However, during the last GC we called
  4223         // set_saved_mark() on all the GC alloc regions, so card
  4224         // scanning might skip the [saved_mark_word()...top()] area of
  4225         // those regions (i.e., the area we allocated objects into
  4226         // during the last GC). But it shouldn't. Given that
  4227         // saved_mark_word() is conditional on whether the GC time stamp
  4228         // on the region is current or not, by incrementing the GC time
  4229         // stamp here we invalidate all the GC time stamps on all the
  4230         // regions and saved_mark_word() will simply return top() for
  4231         // all the regions. This is a nicer way of ensuring this rather
  4232         // than iterating over the regions and fixing them. In fact, the
  4233         // GC time stamp increment here also ensures that
  4234         // saved_mark_word() will return top() between pauses, i.e.,
  4235         // during concurrent refinement. So we don't need the
  4236         // is_gc_active() check to decided which top to use when
  4237         // scanning cards (see CR 7039627).
  4238         increment_gc_time_stamp();
  4240         verify_after_gc();
  4242         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  4243         ref_processor_stw()->verify_no_references_recorded();
  4245         // CM reference discovery will be re-enabled if necessary.
  4248       // We should do this after we potentially expand the heap so
  4249       // that all the COMMIT events are generated before the end GC
  4250       // event, and after we retire the GC alloc regions so that all
  4251       // RETIRE events are generated before the end GC event.
  4252       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
  4254       if (mark_in_progress()) {
  4255         concurrent_mark()->update_g1_committed();
  4258 #ifdef TRACESPINNING
  4259       ParallelTaskTerminator::print_termination_counts();
  4260 #endif
  4262       gc_epilogue(false);
  4265     // Print the remainder of the GC log output.
  4266     log_gc_footer(os::elapsedTime() - pause_start_sec);
  4268     // It is not yet to safe to tell the concurrent mark to
  4269     // start as we have some optional output below. We don't want the
  4270     // output from the concurrent mark thread interfering with this
  4271     // logging output either.
  4273     _hrs.verify_optional();
  4274     verify_region_sets_optional();
  4276     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
  4277     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  4279     print_heap_after_gc();
  4280     trace_heap_after_gc(_gc_tracer_stw);
  4282     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  4283     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  4284     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  4285     // before any GC notifications are raised.
  4286     g1mm()->update_sizes();
  4288     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
  4289     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
  4290     _gc_timer_stw->register_gc_end();
  4291     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
  4293   // It should now be safe to tell the concurrent mark thread to start
  4294   // without its logging output interfering with the logging output
  4295   // that came from the pause.
  4297   if (should_start_conc_mark) {
  4298     // CAUTION: after the doConcurrentMark() call below,
  4299     // the concurrent marking thread(s) could be running
  4300     // concurrently with us. Make sure that anything after
  4301     // this point does not assume that we are the only GC thread
  4302     // running. Note: of course, the actual marking work will
  4303     // not start until the safepoint itself is released in
  4304     // SuspendibleThreadSet::desynchronize().
  4305     doConcurrentMark();
  4308   return true;
  4311 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
  4313   size_t gclab_word_size;
  4314   switch (purpose) {
  4315     case GCAllocForSurvived:
  4316       gclab_word_size = _survivor_plab_stats.desired_plab_sz();
  4317       break;
  4318     case GCAllocForTenured:
  4319       gclab_word_size = _old_plab_stats.desired_plab_sz();
  4320       break;
  4321     default:
  4322       assert(false, "unknown GCAllocPurpose");
  4323       gclab_word_size = _old_plab_stats.desired_plab_sz();
  4324       break;
  4327   // Prevent humongous PLAB sizes for two reasons:
  4328   // * PLABs are allocated using a similar paths as oops, but should
  4329   //   never be in a humongous region
  4330   // * Allowing humongous PLABs needlessly churns the region free lists
  4331   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
  4334 void G1CollectedHeap::init_mutator_alloc_region() {
  4335   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  4336   _mutator_alloc_region.init();
  4339 void G1CollectedHeap::release_mutator_alloc_region() {
  4340   _mutator_alloc_region.release();
  4341   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  4344 void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
  4345   HeapRegion* retained_region = _retained_old_gc_alloc_region;
  4346   _retained_old_gc_alloc_region = NULL;
  4348   // We will discard the current GC alloc region if:
  4349   // a) it's in the collection set (it can happen!),
  4350   // b) it's already full (no point in using it),
  4351   // c) it's empty (this means that it was emptied during
  4352   // a cleanup and it should be on the free list now), or
  4353   // d) it's humongous (this means that it was emptied
  4354   // during a cleanup and was added to the free list, but
  4355   // has been subsequently used to allocate a humongous
  4356   // object that may be less than the region size).
  4357   if (retained_region != NULL &&
  4358       !retained_region->in_collection_set() &&
  4359       !(retained_region->top() == retained_region->end()) &&
  4360       !retained_region->is_empty() &&
  4361       !retained_region->isHumongous()) {
  4362     retained_region->record_top_and_timestamp();
  4363     // The retained region was added to the old region set when it was
  4364     // retired. We have to remove it now, since we don't allow regions
  4365     // we allocate to in the region sets. We'll re-add it later, when
  4366     // it's retired again.
  4367     _old_set.remove(retained_region);
  4368     bool during_im = g1_policy()->during_initial_mark_pause();
  4369     retained_region->note_start_of_copying(during_im);
  4370     _old_gc_alloc_region.set(retained_region);
  4371     _hr_printer.reuse(retained_region);
  4372     evacuation_info.set_alloc_regions_used_before(retained_region->used());
  4376 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
  4377   assert_at_safepoint(true /* should_be_vm_thread */);
  4379   _survivor_gc_alloc_region.init();
  4380   _old_gc_alloc_region.init();
  4382   use_retained_old_gc_alloc_region(evacuation_info);
  4385 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
  4386   evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
  4387                                          _old_gc_alloc_region.count());
  4388   _survivor_gc_alloc_region.release();
  4389   // If we have an old GC alloc region to release, we'll save it in
  4390   // _retained_old_gc_alloc_region. If we don't
  4391   // _retained_old_gc_alloc_region will become NULL. This is what we
  4392   // want either way so no reason to check explicitly for either
  4393   // condition.
  4394   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
  4396   if (ResizePLAB) {
  4397     _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
  4398     _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
  4402 void G1CollectedHeap::abandon_gc_alloc_regions() {
  4403   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
  4404   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
  4405   _retained_old_gc_alloc_region = NULL;
  4408 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  4409   _drain_in_progress = false;
  4410   set_evac_failure_closure(cl);
  4411   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
  4414 void G1CollectedHeap::finalize_for_evac_failure() {
  4415   assert(_evac_failure_scan_stack != NULL &&
  4416          _evac_failure_scan_stack->length() == 0,
  4417          "Postcondition");
  4418   assert(!_drain_in_progress, "Postcondition");
  4419   delete _evac_failure_scan_stack;
  4420   _evac_failure_scan_stack = NULL;
  4423 void G1CollectedHeap::remove_self_forwarding_pointers() {
  4424   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  4426   double remove_self_forwards_start = os::elapsedTime();
  4428   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
  4430   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4431     set_par_threads();
  4432     workers()->run_task(&rsfp_task);
  4433     set_par_threads(0);
  4434   } else {
  4435     rsfp_task.work(0);
  4438   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
  4440   // Reset the claim values in the regions in the collection set.
  4441   reset_cset_heap_region_claim_values();
  4443   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
  4445   // Now restore saved marks, if any.
  4446   assert(_objs_with_preserved_marks.size() ==
  4447             _preserved_marks_of_objs.size(), "Both or none.");
  4448   while (!_objs_with_preserved_marks.is_empty()) {
  4449     oop obj = _objs_with_preserved_marks.pop();
  4450     markOop m = _preserved_marks_of_objs.pop();
  4451     obj->set_mark(m);
  4453   _objs_with_preserved_marks.clear(true);
  4454   _preserved_marks_of_objs.clear(true);
  4456   g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
  4459 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  4460   _evac_failure_scan_stack->push(obj);
  4463 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  4464   assert(_evac_failure_scan_stack != NULL, "precondition");
  4466   while (_evac_failure_scan_stack->length() > 0) {
  4467      oop obj = _evac_failure_scan_stack->pop();
  4468      _evac_failure_closure->set_region(heap_region_containing(obj));
  4469      obj->oop_iterate_backwards(_evac_failure_closure);
  4473 oop
  4474 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
  4475                                                oop old) {
  4476   assert(obj_in_cs(old),
  4477          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
  4478                  (HeapWord*) old));
  4479   markOop m = old->mark();
  4480   oop forward_ptr = old->forward_to_atomic(old);
  4481   if (forward_ptr == NULL) {
  4482     // Forward-to-self succeeded.
  4483     assert(_par_scan_state != NULL, "par scan state");
  4484     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  4485     uint queue_num = _par_scan_state->queue_num();
  4487     _evacuation_failed = true;
  4488     _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
  4489     if (_evac_failure_closure != cl) {
  4490       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  4491       assert(!_drain_in_progress,
  4492              "Should only be true while someone holds the lock.");
  4493       // Set the global evac-failure closure to the current thread's.
  4494       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  4495       set_evac_failure_closure(cl);
  4496       // Now do the common part.
  4497       handle_evacuation_failure_common(old, m);
  4498       // Reset to NULL.
  4499       set_evac_failure_closure(NULL);
  4500     } else {
  4501       // The lock is already held, and this is recursive.
  4502       assert(_drain_in_progress, "This should only be the recursive case.");
  4503       handle_evacuation_failure_common(old, m);
  4505     return old;
  4506   } else {
  4507     // Forward-to-self failed. Either someone else managed to allocate
  4508     // space for this object (old != forward_ptr) or they beat us in
  4509     // self-forwarding it (old == forward_ptr).
  4510     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
  4511            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
  4512                    "should not be in the CSet",
  4513                    (HeapWord*) old, (HeapWord*) forward_ptr));
  4514     return forward_ptr;
  4518 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  4519   preserve_mark_if_necessary(old, m);
  4521   HeapRegion* r = heap_region_containing(old);
  4522   if (!r->evacuation_failed()) {
  4523     r->set_evacuation_failed(true);
  4524     _hr_printer.evac_failure(r);
  4527   push_on_evac_failure_scan_stack(old);
  4529   if (!_drain_in_progress) {
  4530     // prevent recursion in copy_to_survivor_space()
  4531     _drain_in_progress = true;
  4532     drain_evac_failure_scan_stack();
  4533     _drain_in_progress = false;
  4537 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  4538   assert(evacuation_failed(), "Oversaving!");
  4539   // We want to call the "for_promotion_failure" version only in the
  4540   // case of a promotion failure.
  4541   if (m->must_be_preserved_for_promotion_failure(obj)) {
  4542     _objs_with_preserved_marks.push(obj);
  4543     _preserved_marks_of_objs.push(m);
  4547 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  4548                                                   size_t word_size) {
  4549   if (purpose == GCAllocForSurvived) {
  4550     HeapWord* result = survivor_attempt_allocation(word_size);
  4551     if (result != NULL) {
  4552       return result;
  4553     } else {
  4554       // Let's try to allocate in the old gen in case we can fit the
  4555       // object there.
  4556       return old_attempt_allocation(word_size);
  4558   } else {
  4559     assert(purpose ==  GCAllocForTenured, "sanity");
  4560     HeapWord* result = old_attempt_allocation(word_size);
  4561     if (result != NULL) {
  4562       return result;
  4563     } else {
  4564       // Let's try to allocate in the survivors in case we can fit the
  4565       // object there.
  4566       return survivor_attempt_allocation(word_size);
  4570   ShouldNotReachHere();
  4571   // Trying to keep some compilers happy.
  4572   return NULL;
  4575 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
  4576   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
  4578 void G1ParCopyHelper::mark_object(oop obj) {
  4579 #ifdef ASSERT
  4580   HeapRegion* hr = _g1->heap_region_containing(obj);
  4581   assert(hr != NULL, "sanity");
  4582   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
  4583 #endif // ASSERT
  4585   // We know that the object is not moving so it's safe to read its size.
  4586   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
  4589 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
  4590 #ifdef ASSERT
  4591   assert(from_obj->is_forwarded(), "from obj should be forwarded");
  4592   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
  4593   assert(from_obj != to_obj, "should not be self-forwarded");
  4595   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
  4596   assert(from_hr != NULL, "sanity");
  4597   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
  4599   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
  4600   assert(to_hr != NULL, "sanity");
  4601   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
  4602 #endif // ASSERT
  4604   // The object might be in the process of being copied by another
  4605   // worker so we cannot trust that its to-space image is
  4606   // well-formed. So we have to read its size from its from-space
  4607   // image which we know should not be changing.
  4608   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
  4611 template <class T>
  4612 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
  4613   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
  4614     _scanned_klass->record_modified_oops();
  4618 template <G1Barrier barrier, G1Mark do_mark_object>
  4619 template <class T>
  4620 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
  4621   T heap_oop = oopDesc::load_heap_oop(p);
  4623   if (oopDesc::is_null(heap_oop)) {
  4624     return;
  4627   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  4629   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
  4631   if (_g1->in_cset_fast_test(obj)) {
  4632     oop forwardee;
  4633     if (obj->is_forwarded()) {
  4634       forwardee = obj->forwardee();
  4635     } else {
  4636       forwardee = _par_scan_state->copy_to_survivor_space(obj);
  4638     assert(forwardee != NULL, "forwardee should not be NULL");
  4639     oopDesc::encode_store_heap_oop(p, forwardee);
  4640     if (do_mark_object != G1MarkNone && forwardee != obj) {
  4641       // If the object is self-forwarded we don't need to explicitly
  4642       // mark it, the evacuation failure protocol will do so.
  4643       mark_forwarded_object(obj, forwardee);
  4646     if (barrier == G1BarrierKlass) {
  4647       do_klass_barrier(p, forwardee);
  4649   } else {
  4650     // The object is not in collection set. If we're a root scanning
  4651     // closure during an initial mark pause then attempt to mark the object.
  4652     if (do_mark_object == G1MarkFromRoot) {
  4653       mark_object(obj);
  4657   if (barrier == G1BarrierEvac) {
  4658     _par_scan_state->update_rs(_from, p, _worker_id);
  4662 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
  4663 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
  4665 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4666 protected:
  4667   G1CollectedHeap*              _g1h;
  4668   G1ParScanThreadState*         _par_scan_state;
  4669   RefToScanQueueSet*            _queues;
  4670   ParallelTaskTerminator*       _terminator;
  4672   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  4673   RefToScanQueueSet*      queues()         { return _queues; }
  4674   ParallelTaskTerminator* terminator()     { return _terminator; }
  4676 public:
  4677   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  4678                                 G1ParScanThreadState* par_scan_state,
  4679                                 RefToScanQueueSet* queues,
  4680                                 ParallelTaskTerminator* terminator)
  4681     : _g1h(g1h), _par_scan_state(par_scan_state),
  4682       _queues(queues), _terminator(terminator) {}
  4684   void do_void();
  4686 private:
  4687   inline bool offer_termination();
  4688 };
  4690 bool G1ParEvacuateFollowersClosure::offer_termination() {
  4691   G1ParScanThreadState* const pss = par_scan_state();
  4692   pss->start_term_time();
  4693   const bool res = terminator()->offer_termination();
  4694   pss->end_term_time();
  4695   return res;
  4698 void G1ParEvacuateFollowersClosure::do_void() {
  4699   G1ParScanThreadState* const pss = par_scan_state();
  4700   pss->trim_queue();
  4701   do {
  4702     pss->steal_and_trim_queue(queues());
  4703   } while (!offer_termination());
  4706 class G1KlassScanClosure : public KlassClosure {
  4707  G1ParCopyHelper* _closure;
  4708  bool             _process_only_dirty;
  4709  int              _count;
  4710  public:
  4711   G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
  4712       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
  4713   void do_klass(Klass* klass) {
  4714     // If the klass has not been dirtied we know that there's
  4715     // no references into  the young gen and we can skip it.
  4716    if (!_process_only_dirty || klass->has_modified_oops()) {
  4717       // Clean the klass since we're going to scavenge all the metadata.
  4718       klass->clear_modified_oops();
  4720       // Tell the closure that this klass is the Klass to scavenge
  4721       // and is the one to dirty if oops are left pointing into the young gen.
  4722       _closure->set_scanned_klass(klass);
  4724       klass->oops_do(_closure);
  4726       _closure->set_scanned_klass(NULL);
  4728     _count++;
  4730 };
  4732 class G1ParTask : public AbstractGangTask {
  4733 protected:
  4734   G1CollectedHeap*       _g1h;
  4735   RefToScanQueueSet      *_queues;
  4736   ParallelTaskTerminator _terminator;
  4737   uint _n_workers;
  4739   Mutex _stats_lock;
  4740   Mutex* stats_lock() { return &_stats_lock; }
  4742   size_t getNCards() {
  4743     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  4744       / G1BlockOffsetSharedArray::N_bytes;
  4747 public:
  4748   G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
  4749     : AbstractGangTask("G1 collection"),
  4750       _g1h(g1h),
  4751       _queues(task_queues),
  4752       _terminator(0, _queues),
  4753       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
  4754   {}
  4756   RefToScanQueueSet* queues() { return _queues; }
  4758   RefToScanQueue *work_queue(int i) {
  4759     return queues()->queue(i);
  4762   ParallelTaskTerminator* terminator() { return &_terminator; }
  4764   virtual void set_for_termination(int active_workers) {
  4765     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
  4766     // in the young space (_par_seq_tasks) in the G1 heap
  4767     // for SequentialSubTasksDone.
  4768     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
  4769     // both of which need setting by set_n_termination().
  4770     _g1h->SharedHeap::set_n_termination(active_workers);
  4771     _g1h->set_n_termination(active_workers);
  4772     terminator()->reset_for_reuse(active_workers);
  4773     _n_workers = active_workers;
  4776   // Helps out with CLD processing.
  4777   //
  4778   // During InitialMark we need to:
  4779   // 1) Scavenge all CLDs for the young GC.
  4780   // 2) Mark all objects directly reachable from strong CLDs.
  4781   template <G1Mark do_mark_object>
  4782   class G1CLDClosure : public CLDClosure {
  4783     G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
  4784     G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
  4785     G1KlassScanClosure                                _klass_in_cld_closure;
  4786     bool                                              _claim;
  4788    public:
  4789     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
  4790                  bool only_young, bool claim)
  4791         : _oop_closure(oop_closure),
  4792           _oop_in_klass_closure(oop_closure->g1(),
  4793                                 oop_closure->pss(),
  4794                                 oop_closure->rp()),
  4795           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
  4796           _claim(claim) {
  4800     void do_cld(ClassLoaderData* cld) {
  4801       cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
  4803   };
  4805   class G1CodeBlobClosure: public CodeBlobClosure {
  4806     OopClosure* _f;
  4808    public:
  4809     G1CodeBlobClosure(OopClosure* f) : _f(f) {}
  4810     void do_code_blob(CodeBlob* blob) {
  4811       nmethod* that = blob->as_nmethod_or_null();
  4812       if (that != NULL) {
  4813         if (!that->test_set_oops_do_mark()) {
  4814           that->oops_do(_f);
  4815           that->fix_oop_relocations();
  4819   };
  4821   void work(uint worker_id) {
  4822     if (worker_id >= _n_workers) return;  // no work needed this round
  4824     double start_time_ms = os::elapsedTime() * 1000.0;
  4825     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
  4828       ResourceMark rm;
  4829       HandleMark   hm;
  4831       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
  4833       G1ParScanThreadState            pss(_g1h, worker_id, rp);
  4834       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
  4836       pss.set_evac_failure_closure(&evac_failure_cl);
  4838       bool only_young = _g1h->g1_policy()->gcs_are_young();
  4840       // Non-IM young GC.
  4841       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
  4842       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
  4843                                                                                only_young, // Only process dirty klasses.
  4844                                                                                false);     // No need to claim CLDs.
  4845       // IM young GC.
  4846       //    Strong roots closures.
  4847       G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
  4848       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
  4849                                                                                false, // Process all klasses.
  4850                                                                                true); // Need to claim CLDs.
  4851       //    Weak roots closures.
  4852       G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
  4853       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
  4854                                                                                     false, // Process all klasses.
  4855                                                                                     true); // Need to claim CLDs.
  4857       G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
  4858       G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
  4859       // IM Weak code roots are handled later.
  4861       OopClosure* strong_root_cl;
  4862       OopClosure* weak_root_cl;
  4863       CLDClosure* strong_cld_cl;
  4864       CLDClosure* weak_cld_cl;
  4865       CodeBlobClosure* strong_code_cl;
  4867       if (_g1h->g1_policy()->during_initial_mark_pause()) {
  4868         // We also need to mark copied objects.
  4869         strong_root_cl = &scan_mark_root_cl;
  4870         strong_cld_cl  = &scan_mark_cld_cl;
  4871         strong_code_cl = &scan_mark_code_cl;
  4872         if (ClassUnloadingWithConcurrentMark) {
  4873           weak_root_cl = &scan_mark_weak_root_cl;
  4874           weak_cld_cl  = &scan_mark_weak_cld_cl;
  4875         } else {
  4876           weak_root_cl = &scan_mark_root_cl;
  4877           weak_cld_cl  = &scan_mark_cld_cl;
  4879       } else {
  4880         strong_root_cl = &scan_only_root_cl;
  4881         weak_root_cl   = &scan_only_root_cl;
  4882         strong_cld_cl  = &scan_only_cld_cl;
  4883         weak_cld_cl    = &scan_only_cld_cl;
  4884         strong_code_cl = &scan_only_code_cl;
  4888       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
  4890       pss.start_strong_roots();
  4891       _g1h->g1_process_roots(strong_root_cl,
  4892                              weak_root_cl,
  4893                              &push_heap_rs_cl,
  4894                              strong_cld_cl,
  4895                              weak_cld_cl,
  4896                              strong_code_cl,
  4897                              worker_id);
  4899       pss.end_strong_roots();
  4902         double start = os::elapsedTime();
  4903         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  4904         evac.do_void();
  4905         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  4906         double term_ms = pss.term_time()*1000.0;
  4907         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
  4908         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
  4910       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  4911       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  4913       if (ParallelGCVerbose) {
  4914         MutexLocker x(stats_lock());
  4915         pss.print_termination_stats(worker_id);
  4918       assert(pss.queue_is_empty(), "should be empty");
  4920       // Close the inner scope so that the ResourceMark and HandleMark
  4921       // destructors are executed here and are included as part of the
  4922       // "GC Worker Time".
  4925     double end_time_ms = os::elapsedTime() * 1000.0;
  4926     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
  4928 };
  4930 // *** Common G1 Evacuation Stuff
  4932 // This method is run in a GC worker.
  4934 void
  4935 G1CollectedHeap::
  4936 g1_process_roots(OopClosure* scan_non_heap_roots,
  4937                  OopClosure* scan_non_heap_weak_roots,
  4938                  OopsInHeapRegionClosure* scan_rs,
  4939                  CLDClosure* scan_strong_clds,
  4940                  CLDClosure* scan_weak_clds,
  4941                  CodeBlobClosure* scan_strong_code,
  4942                  uint worker_i) {
  4944   // First scan the shared roots.
  4945   double ext_roots_start = os::elapsedTime();
  4946   double closure_app_time_sec = 0.0;
  4948   bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
  4949   bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
  4951   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4952   BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
  4954   process_roots(false, // no scoping; this is parallel code
  4955                 SharedHeap::SO_None,
  4956                 &buf_scan_non_heap_roots,
  4957                 &buf_scan_non_heap_weak_roots,
  4958                 scan_strong_clds,
  4959                 // Unloading Initial Marks handle the weak CLDs separately.
  4960                 (trace_metadata ? NULL : scan_weak_clds),
  4961                 scan_strong_code);
  4963   // Now the CM ref_processor roots.
  4964   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4965     // We need to treat the discovered reference lists of the
  4966     // concurrent mark ref processor as roots and keep entries
  4967     // (which are added by the marking threads) on them live
  4968     // until they can be processed at the end of marking.
  4969     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
  4972   if (trace_metadata) {
  4973     // Barrier to make sure all workers passed
  4974     // the strong CLD and strong nmethods phases.
  4975     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
  4977     // Now take the complement of the strong CLDs.
  4978     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
  4981   // Finish up any enqueued closure apps (attributed as object copy time).
  4982   buf_scan_non_heap_roots.done();
  4983   buf_scan_non_heap_weak_roots.done();
  4985   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
  4986       + buf_scan_non_heap_weak_roots.closure_app_seconds();
  4988   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  4990   double ext_root_time_ms =
  4991     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  4993   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  4995   // During conc marking we have to filter the per-thread SATB buffers
  4996   // to make sure we remove any oops into the CSet (which will show up
  4997   // as implicitly live).
  4998   double satb_filtering_ms = 0.0;
  4999   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
  5000     if (mark_in_progress()) {
  5001       double satb_filter_start = os::elapsedTime();
  5003       JavaThread::satb_mark_queue_set().filter_thread_buffers();
  5005       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
  5008   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
  5010   // Now scan the complement of the collection set.
  5011   MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
  5013   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
  5015   _process_strong_tasks->all_tasks_completed();
  5018 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
  5019 private:
  5020   BoolObjectClosure* _is_alive;
  5021   int _initial_string_table_size;
  5022   int _initial_symbol_table_size;
  5024   bool  _process_strings;
  5025   int _strings_processed;
  5026   int _strings_removed;
  5028   bool  _process_symbols;
  5029   int _symbols_processed;
  5030   int _symbols_removed;
  5032   bool _do_in_parallel;
  5033 public:
  5034   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
  5035     AbstractGangTask("String/Symbol Unlinking"),
  5036     _is_alive(is_alive),
  5037     _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
  5038     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
  5039     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
  5041     _initial_string_table_size = StringTable::the_table()->table_size();
  5042     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
  5043     if (process_strings) {
  5044       StringTable::clear_parallel_claimed_index();
  5046     if (process_symbols) {
  5047       SymbolTable::clear_parallel_claimed_index();
  5051   ~G1StringSymbolTableUnlinkTask() {
  5052     guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
  5053               err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
  5054                       StringTable::parallel_claimed_index(), _initial_string_table_size));
  5055     guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
  5056               err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
  5057                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
  5059     if (G1TraceStringSymbolTableScrubbing) {
  5060       gclog_or_tty->print_cr("Cleaned string and symbol table, "
  5061                              "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
  5062                              "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
  5063                              strings_processed(), strings_removed(),
  5064                              symbols_processed(), symbols_removed());
  5068   void work(uint worker_id) {
  5069     if (_do_in_parallel) {
  5070       int strings_processed = 0;
  5071       int strings_removed = 0;
  5072       int symbols_processed = 0;
  5073       int symbols_removed = 0;
  5074       if (_process_strings) {
  5075         StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
  5076         Atomic::add(strings_processed, &_strings_processed);
  5077         Atomic::add(strings_removed, &_strings_removed);
  5079       if (_process_symbols) {
  5080         SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
  5081         Atomic::add(symbols_processed, &_symbols_processed);
  5082         Atomic::add(symbols_removed, &_symbols_removed);
  5084     } else {
  5085       if (_process_strings) {
  5086         StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
  5088       if (_process_symbols) {
  5089         SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
  5094   size_t strings_processed() const { return (size_t)_strings_processed; }
  5095   size_t strings_removed()   const { return (size_t)_strings_removed; }
  5097   size_t symbols_processed() const { return (size_t)_symbols_processed; }
  5098   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
  5099 };
  5101 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
  5102 private:
  5103   static Monitor* _lock;
  5105   BoolObjectClosure* const _is_alive;
  5106   const bool               _unloading_occurred;
  5107   const uint               _num_workers;
  5109   // Variables used to claim nmethods.
  5110   nmethod* _first_nmethod;
  5111   volatile nmethod* _claimed_nmethod;
  5113   // The list of nmethods that need to be processed by the second pass.
  5114   volatile nmethod* _postponed_list;
  5115   volatile uint     _num_entered_barrier;
  5117  public:
  5118   G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
  5119       _is_alive(is_alive),
  5120       _unloading_occurred(unloading_occurred),
  5121       _num_workers(num_workers),
  5122       _first_nmethod(NULL),
  5123       _claimed_nmethod(NULL),
  5124       _postponed_list(NULL),
  5125       _num_entered_barrier(0)
  5127     nmethod::increase_unloading_clock();
  5128     _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
  5129     _claimed_nmethod = (volatile nmethod*)_first_nmethod;
  5132   ~G1CodeCacheUnloadingTask() {
  5133     CodeCache::verify_clean_inline_caches();
  5135     CodeCache::set_needs_cache_clean(false);
  5136     guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
  5138     CodeCache::verify_icholder_relocations();
  5141  private:
  5142   void add_to_postponed_list(nmethod* nm) {
  5143       nmethod* old;
  5144       do {
  5145         old = (nmethod*)_postponed_list;
  5146         nm->set_unloading_next(old);
  5147       } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
  5150   void clean_nmethod(nmethod* nm) {
  5151     bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
  5153     if (postponed) {
  5154       // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
  5155       add_to_postponed_list(nm);
  5158     // Mark that this thread has been cleaned/unloaded.
  5159     // After this call, it will be safe to ask if this nmethod was unloaded or not.
  5160     nm->set_unloading_clock(nmethod::global_unloading_clock());
  5163   void clean_nmethod_postponed(nmethod* nm) {
  5164     nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
  5167   static const int MaxClaimNmethods = 16;
  5169   void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
  5170     nmethod* first;
  5171     nmethod* last;
  5173     do {
  5174       *num_claimed_nmethods = 0;
  5176       first = last = (nmethod*)_claimed_nmethod;
  5178       if (first != NULL) {
  5179         for (int i = 0; i < MaxClaimNmethods; i++) {
  5180           last = CodeCache::alive_nmethod(CodeCache::next(last));
  5182           if (last == NULL) {
  5183             break;
  5186           claimed_nmethods[i] = last;
  5187           (*num_claimed_nmethods)++;
  5191     } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
  5194   nmethod* claim_postponed_nmethod() {
  5195     nmethod* claim;
  5196     nmethod* next;
  5198     do {
  5199       claim = (nmethod*)_postponed_list;
  5200       if (claim == NULL) {
  5201         return NULL;
  5204       next = claim->unloading_next();
  5206     } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
  5208     return claim;
  5211  public:
  5212   // Mark that we're done with the first pass of nmethod cleaning.
  5213   void barrier_mark(uint worker_id) {
  5214     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
  5215     _num_entered_barrier++;
  5216     if (_num_entered_barrier == _num_workers) {
  5217       ml.notify_all();
  5221   // See if we have to wait for the other workers to
  5222   // finish their first-pass nmethod cleaning work.
  5223   void barrier_wait(uint worker_id) {
  5224     if (_num_entered_barrier < _num_workers) {
  5225       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
  5226       while (_num_entered_barrier < _num_workers) {
  5227           ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
  5232   // Cleaning and unloading of nmethods. Some work has to be postponed
  5233   // to the second pass, when we know which nmethods survive.
  5234   void work_first_pass(uint worker_id) {
  5235     // The first nmethods is claimed by the first worker.
  5236     if (worker_id == 0 && _first_nmethod != NULL) {
  5237       clean_nmethod(_first_nmethod);
  5238       _first_nmethod = NULL;
  5241     int num_claimed_nmethods;
  5242     nmethod* claimed_nmethods[MaxClaimNmethods];
  5244     while (true) {
  5245       claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
  5247       if (num_claimed_nmethods == 0) {
  5248         break;
  5251       for (int i = 0; i < num_claimed_nmethods; i++) {
  5252         clean_nmethod(claimed_nmethods[i]);
  5257   void work_second_pass(uint worker_id) {
  5258     nmethod* nm;
  5259     // Take care of postponed nmethods.
  5260     while ((nm = claim_postponed_nmethod()) != NULL) {
  5261       clean_nmethod_postponed(nm);
  5264 };
  5266 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
  5268 class G1KlassCleaningTask : public StackObj {
  5269   BoolObjectClosure*                      _is_alive;
  5270   volatile jint                           _clean_klass_tree_claimed;
  5271   ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
  5273  public:
  5274   G1KlassCleaningTask(BoolObjectClosure* is_alive) :
  5275       _is_alive(is_alive),
  5276       _clean_klass_tree_claimed(0),
  5277       _klass_iterator() {
  5280  private:
  5281   bool claim_clean_klass_tree_task() {
  5282     if (_clean_klass_tree_claimed) {
  5283       return false;
  5286     return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
  5289   InstanceKlass* claim_next_klass() {
  5290     Klass* klass;
  5291     do {
  5292       klass =_klass_iterator.next_klass();
  5293     } while (klass != NULL && !klass->oop_is_instance());
  5295     return (InstanceKlass*)klass;
  5298 public:
  5300   void clean_klass(InstanceKlass* ik) {
  5301     ik->clean_implementors_list(_is_alive);
  5302     ik->clean_method_data(_is_alive);
  5304     // G1 specific cleanup work that has
  5305     // been moved here to be done in parallel.
  5306     ik->clean_dependent_nmethods();
  5309   void work() {
  5310     ResourceMark rm;
  5312     // One worker will clean the subklass/sibling klass tree.
  5313     if (claim_clean_klass_tree_task()) {
  5314       Klass::clean_subklass_tree(_is_alive);
  5317     // All workers will help cleaning the classes,
  5318     InstanceKlass* klass;
  5319     while ((klass = claim_next_klass()) != NULL) {
  5320       clean_klass(klass);
  5323 };
  5325 // To minimize the remark pause times, the tasks below are done in parallel.
  5326 class G1ParallelCleaningTask : public AbstractGangTask {
  5327 private:
  5328   G1StringSymbolTableUnlinkTask _string_symbol_task;
  5329   G1CodeCacheUnloadingTask      _code_cache_task;
  5330   G1KlassCleaningTask           _klass_cleaning_task;
  5332 public:
  5333   // The constructor is run in the VMThread.
  5334   G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
  5335       AbstractGangTask("Parallel Cleaning"),
  5336       _string_symbol_task(is_alive, process_strings, process_symbols),
  5337       _code_cache_task(num_workers, is_alive, unloading_occurred),
  5338       _klass_cleaning_task(is_alive) {
  5341   // The parallel work done by all worker threads.
  5342   void work(uint worker_id) {
  5343     // Do first pass of code cache cleaning.
  5344     _code_cache_task.work_first_pass(worker_id);
  5346     // Let the threads mark that the first pass is done.
  5347     _code_cache_task.barrier_mark(worker_id);
  5349     // Clean the Strings and Symbols.
  5350     _string_symbol_task.work(worker_id);
  5352     // Wait for all workers to finish the first code cache cleaning pass.
  5353     _code_cache_task.barrier_wait(worker_id);
  5355     // Do the second code cache cleaning work, which realize on
  5356     // the liveness information gathered during the first pass.
  5357     _code_cache_task.work_second_pass(worker_id);
  5359     // Clean all klasses that were not unloaded.
  5360     _klass_cleaning_task.work();
  5362 };
  5365 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
  5366                                         bool process_strings,
  5367                                         bool process_symbols,
  5368                                         bool class_unloading_occurred) {
  5369   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  5370                     workers()->active_workers() : 1);
  5372   G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
  5373                                         n_workers, class_unloading_occurred);
  5374   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5375     set_par_threads(n_workers);
  5376     workers()->run_task(&g1_unlink_task);
  5377     set_par_threads(0);
  5378   } else {
  5379     g1_unlink_task.work(0);
  5383 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
  5384                                                      bool process_strings, bool process_symbols) {
  5386     uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  5387                      _g1h->workers()->active_workers() : 1);
  5388     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
  5389     if (G1CollectedHeap::use_parallel_gc_threads()) {
  5390       set_par_threads(n_workers);
  5391       workers()->run_task(&g1_unlink_task);
  5392       set_par_threads(0);
  5393     } else {
  5394       g1_unlink_task.work(0);
  5398   if (G1StringDedup::is_enabled()) {
  5399     G1StringDedup::unlink(is_alive);
  5403 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
  5404  private:
  5405   DirtyCardQueueSet* _queue;
  5406  public:
  5407   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
  5409   virtual void work(uint worker_id) {
  5410     double start_time = os::elapsedTime();
  5412     RedirtyLoggedCardTableEntryClosure cl;
  5413     if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
  5414       _queue->par_apply_closure_to_all_completed_buffers(&cl);
  5415     } else {
  5416       _queue->apply_closure_to_all_completed_buffers(&cl);
  5419     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
  5420     timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
  5421     timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
  5423 };
  5425 void G1CollectedHeap::redirty_logged_cards() {
  5426   guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
  5427   double redirty_logged_cards_start = os::elapsedTime();
  5429   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  5430                    _g1h->workers()->active_workers() : 1);
  5432   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
  5433   dirty_card_queue_set().reset_for_par_iteration();
  5434   if (use_parallel_gc_threads()) {
  5435     set_par_threads(n_workers);
  5436     workers()->run_task(&redirty_task);
  5437     set_par_threads(0);
  5438   } else {
  5439     redirty_task.work(0);
  5442   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
  5443   dcq.merge_bufferlists(&dirty_card_queue_set());
  5444   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  5446   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
  5449 // Weak Reference Processing support
  5451 // An always "is_alive" closure that is used to preserve referents.
  5452 // If the object is non-null then it's alive.  Used in the preservation
  5453 // of referent objects that are pointed to by reference objects
  5454 // discovered by the CM ref processor.
  5455 class G1AlwaysAliveClosure: public BoolObjectClosure {
  5456   G1CollectedHeap* _g1;
  5457 public:
  5458   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  5459   bool do_object_b(oop p) {
  5460     if (p != NULL) {
  5461       return true;
  5463     return false;
  5465 };
  5467 bool G1STWIsAliveClosure::do_object_b(oop p) {
  5468   // An object is reachable if it is outside the collection set,
  5469   // or is inside and copied.
  5470   return !_g1->obj_in_cs(p) || p->is_forwarded();
  5473 // Non Copying Keep Alive closure
  5474 class G1KeepAliveClosure: public OopClosure {
  5475   G1CollectedHeap* _g1;
  5476 public:
  5477   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  5478   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  5479   void do_oop(      oop* p) {
  5480     oop obj = *p;
  5482     if (_g1->obj_in_cs(obj)) {
  5483       assert( obj->is_forwarded(), "invariant" );
  5484       *p = obj->forwardee();
  5487 };
  5489 // Copying Keep Alive closure - can be called from both
  5490 // serial and parallel code as long as different worker
  5491 // threads utilize different G1ParScanThreadState instances
  5492 // and different queues.
  5494 class G1CopyingKeepAliveClosure: public OopClosure {
  5495   G1CollectedHeap*         _g1h;
  5496   OopClosure*              _copy_non_heap_obj_cl;
  5497   G1ParScanThreadState*    _par_scan_state;
  5499 public:
  5500   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
  5501                             OopClosure* non_heap_obj_cl,
  5502                             G1ParScanThreadState* pss):
  5503     _g1h(g1h),
  5504     _copy_non_heap_obj_cl(non_heap_obj_cl),
  5505     _par_scan_state(pss)
  5506   {}
  5508   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  5509   virtual void do_oop(      oop* p) { do_oop_work(p); }
  5511   template <class T> void do_oop_work(T* p) {
  5512     oop obj = oopDesc::load_decode_heap_oop(p);
  5514     if (_g1h->obj_in_cs(obj)) {
  5515       // If the referent object has been forwarded (either copied
  5516       // to a new location or to itself in the event of an
  5517       // evacuation failure) then we need to update the reference
  5518       // field and, if both reference and referent are in the G1
  5519       // heap, update the RSet for the referent.
  5520       //
  5521       // If the referent has not been forwarded then we have to keep
  5522       // it alive by policy. Therefore we have copy the referent.
  5523       //
  5524       // If the reference field is in the G1 heap then we can push
  5525       // on the PSS queue. When the queue is drained (after each
  5526       // phase of reference processing) the object and it's followers
  5527       // will be copied, the reference field set to point to the
  5528       // new location, and the RSet updated. Otherwise we need to
  5529       // use the the non-heap or metadata closures directly to copy
  5530       // the referent object and update the pointer, while avoiding
  5531       // updating the RSet.
  5533       if (_g1h->is_in_g1_reserved(p)) {
  5534         _par_scan_state->push_on_queue(p);
  5535       } else {
  5536         assert(!Metaspace::contains((const void*)p),
  5537                err_msg("Unexpectedly found a pointer from metadata: "
  5538                               PTR_FORMAT, p));
  5539           _copy_non_heap_obj_cl->do_oop(p);
  5543 };
  5545 // Serial drain queue closure. Called as the 'complete_gc'
  5546 // closure for each discovered list in some of the
  5547 // reference processing phases.
  5549 class G1STWDrainQueueClosure: public VoidClosure {
  5550 protected:
  5551   G1CollectedHeap* _g1h;
  5552   G1ParScanThreadState* _par_scan_state;
  5554   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  5556 public:
  5557   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
  5558     _g1h(g1h),
  5559     _par_scan_state(pss)
  5560   { }
  5562   void do_void() {
  5563     G1ParScanThreadState* const pss = par_scan_state();
  5564     pss->trim_queue();
  5566 };
  5568 // Parallel Reference Processing closures
  5570 // Implementation of AbstractRefProcTaskExecutor for parallel reference
  5571 // processing during G1 evacuation pauses.
  5573 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
  5574 private:
  5575   G1CollectedHeap*   _g1h;
  5576   RefToScanQueueSet* _queues;
  5577   FlexibleWorkGang*  _workers;
  5578   int                _active_workers;
  5580 public:
  5581   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
  5582                         FlexibleWorkGang* workers,
  5583                         RefToScanQueueSet *task_queues,
  5584                         int n_workers) :
  5585     _g1h(g1h),
  5586     _queues(task_queues),
  5587     _workers(workers),
  5588     _active_workers(n_workers)
  5590     assert(n_workers > 0, "shouldn't call this otherwise");
  5593   // Executes the given task using concurrent marking worker threads.
  5594   virtual void execute(ProcessTask& task);
  5595   virtual void execute(EnqueueTask& task);
  5596 };
  5598 // Gang task for possibly parallel reference processing
  5600 class G1STWRefProcTaskProxy: public AbstractGangTask {
  5601   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
  5602   ProcessTask&     _proc_task;
  5603   G1CollectedHeap* _g1h;
  5604   RefToScanQueueSet *_task_queues;
  5605   ParallelTaskTerminator* _terminator;
  5607 public:
  5608   G1STWRefProcTaskProxy(ProcessTask& proc_task,
  5609                      G1CollectedHeap* g1h,
  5610                      RefToScanQueueSet *task_queues,
  5611                      ParallelTaskTerminator* terminator) :
  5612     AbstractGangTask("Process reference objects in parallel"),
  5613     _proc_task(proc_task),
  5614     _g1h(g1h),
  5615     _task_queues(task_queues),
  5616     _terminator(terminator)
  5617   {}
  5619   virtual void work(uint worker_id) {
  5620     // The reference processing task executed by a single worker.
  5621     ResourceMark rm;
  5622     HandleMark   hm;
  5624     G1STWIsAliveClosure is_alive(_g1h);
  5626     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
  5627     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
  5629     pss.set_evac_failure_closure(&evac_failure_cl);
  5631     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
  5633     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
  5635     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5637     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5638       // We also need to mark copied objects.
  5639       copy_non_heap_cl = &copy_mark_non_heap_cl;
  5642     // Keep alive closure.
  5643     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
  5645     // Complete GC closure
  5646     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
  5648     // Call the reference processing task's work routine.
  5649     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
  5651     // Note we cannot assert that the refs array is empty here as not all
  5652     // of the processing tasks (specifically phase2 - pp2_work) execute
  5653     // the complete_gc closure (which ordinarily would drain the queue) so
  5654     // the queue may not be empty.
  5656 };
  5658 // Driver routine for parallel reference processing.
  5659 // Creates an instance of the ref processing gang
  5660 // task and has the worker threads execute it.
  5661 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
  5662   assert(_workers != NULL, "Need parallel worker threads.");
  5664   ParallelTaskTerminator terminator(_active_workers, _queues);
  5665   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
  5667   _g1h->set_par_threads(_active_workers);
  5668   _workers->run_task(&proc_task_proxy);
  5669   _g1h->set_par_threads(0);
  5672 // Gang task for parallel reference enqueueing.
  5674 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
  5675   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
  5676   EnqueueTask& _enq_task;
  5678 public:
  5679   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
  5680     AbstractGangTask("Enqueue reference objects in parallel"),
  5681     _enq_task(enq_task)
  5682   { }
  5684   virtual void work(uint worker_id) {
  5685     _enq_task.work(worker_id);
  5687 };
  5689 // Driver routine for parallel reference enqueueing.
  5690 // Creates an instance of the ref enqueueing gang
  5691 // task and has the worker threads execute it.
  5693 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
  5694   assert(_workers != NULL, "Need parallel worker threads.");
  5696   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
  5698   _g1h->set_par_threads(_active_workers);
  5699   _workers->run_task(&enq_task_proxy);
  5700   _g1h->set_par_threads(0);
  5703 // End of weak reference support closures
  5705 // Abstract task used to preserve (i.e. copy) any referent objects
  5706 // that are in the collection set and are pointed to by reference
  5707 // objects discovered by the CM ref processor.
  5709 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
  5710 protected:
  5711   G1CollectedHeap* _g1h;
  5712   RefToScanQueueSet      *_queues;
  5713   ParallelTaskTerminator _terminator;
  5714   uint _n_workers;
  5716 public:
  5717   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
  5718     AbstractGangTask("ParPreserveCMReferents"),
  5719     _g1h(g1h),
  5720     _queues(task_queues),
  5721     _terminator(workers, _queues),
  5722     _n_workers(workers)
  5723   { }
  5725   void work(uint worker_id) {
  5726     ResourceMark rm;
  5727     HandleMark   hm;
  5729     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
  5730     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
  5732     pss.set_evac_failure_closure(&evac_failure_cl);
  5734     assert(pss.queue_is_empty(), "both queue and overflow should be empty");
  5736     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
  5738     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
  5740     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5742     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5743       // We also need to mark copied objects.
  5744       copy_non_heap_cl = &copy_mark_non_heap_cl;
  5747     // Is alive closure
  5748     G1AlwaysAliveClosure always_alive(_g1h);
  5750     // Copying keep alive closure. Applied to referent objects that need
  5751     // to be copied.
  5752     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
  5754     ReferenceProcessor* rp = _g1h->ref_processor_cm();
  5756     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
  5757     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
  5759     // limit is set using max_num_q() - which was set using ParallelGCThreads.
  5760     // So this must be true - but assert just in case someone decides to
  5761     // change the worker ids.
  5762     assert(0 <= worker_id && worker_id < limit, "sanity");
  5763     assert(!rp->discovery_is_atomic(), "check this code");
  5765     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
  5766     for (uint idx = worker_id; idx < limit; idx += stride) {
  5767       DiscoveredList& ref_list = rp->discovered_refs()[idx];
  5769       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
  5770       while (iter.has_next()) {
  5771         // Since discovery is not atomic for the CM ref processor, we
  5772         // can see some null referent objects.
  5773         iter.load_ptrs(DEBUG_ONLY(true));
  5774         oop ref = iter.obj();
  5776         // This will filter nulls.
  5777         if (iter.is_referent_alive()) {
  5778           iter.make_referent_alive();
  5780         iter.move_to_next();
  5784     // Drain the queue - which may cause stealing
  5785     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
  5786     drain_queue.do_void();
  5787     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
  5788     assert(pss.queue_is_empty(), "should be");
  5790 };
  5792 // Weak Reference processing during an evacuation pause (part 1).
  5793 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
  5794   double ref_proc_start = os::elapsedTime();
  5796   ReferenceProcessor* rp = _ref_processor_stw;
  5797   assert(rp->discovery_enabled(), "should have been enabled");
  5799   // Any reference objects, in the collection set, that were 'discovered'
  5800   // by the CM ref processor should have already been copied (either by
  5801   // applying the external root copy closure to the discovered lists, or
  5802   // by following an RSet entry).
  5803   //
  5804   // But some of the referents, that are in the collection set, that these
  5805   // reference objects point to may not have been copied: the STW ref
  5806   // processor would have seen that the reference object had already
  5807   // been 'discovered' and would have skipped discovering the reference,
  5808   // but would not have treated the reference object as a regular oop.
  5809   // As a result the copy closure would not have been applied to the
  5810   // referent object.
  5811   //
  5812   // We need to explicitly copy these referent objects - the references
  5813   // will be processed at the end of remarking.
  5814   //
  5815   // We also need to do this copying before we process the reference
  5816   // objects discovered by the STW ref processor in case one of these
  5817   // referents points to another object which is also referenced by an
  5818   // object discovered by the STW ref processor.
  5820   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
  5821            no_of_gc_workers == workers()->active_workers(),
  5822            "Need to reset active GC workers");
  5824   set_par_threads(no_of_gc_workers);
  5825   G1ParPreserveCMReferentsTask keep_cm_referents(this,
  5826                                                  no_of_gc_workers,
  5827                                                  _task_queues);
  5829   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5830     workers()->run_task(&keep_cm_referents);
  5831   } else {
  5832     keep_cm_referents.work(0);
  5835   set_par_threads(0);
  5837   // Closure to test whether a referent is alive.
  5838   G1STWIsAliveClosure is_alive(this);
  5840   // Even when parallel reference processing is enabled, the processing
  5841   // of JNI refs is serial and performed serially by the current thread
  5842   // rather than by a worker. The following PSS will be used for processing
  5843   // JNI refs.
  5845   // Use only a single queue for this PSS.
  5846   G1ParScanThreadState            pss(this, 0, NULL);
  5848   // We do not embed a reference processor in the copying/scanning
  5849   // closures while we're actually processing the discovered
  5850   // reference objects.
  5851   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
  5853   pss.set_evac_failure_closure(&evac_failure_cl);
  5855   assert(pss.queue_is_empty(), "pre-condition");
  5857   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
  5859   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
  5861   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
  5863   if (_g1h->g1_policy()->during_initial_mark_pause()) {
  5864     // We also need to mark copied objects.
  5865     copy_non_heap_cl = &copy_mark_non_heap_cl;
  5868   // Keep alive closure.
  5869   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
  5871   // Serial Complete GC closure
  5872   G1STWDrainQueueClosure drain_queue(this, &pss);
  5874   // Setup the soft refs policy...
  5875   rp->setup_policy(false);
  5877   ReferenceProcessorStats stats;
  5878   if (!rp->processing_is_mt()) {
  5879     // Serial reference processing...
  5880     stats = rp->process_discovered_references(&is_alive,
  5881                                               &keep_alive,
  5882                                               &drain_queue,
  5883                                               NULL,
  5884                                               _gc_timer_stw,
  5885                                               _gc_tracer_stw->gc_id());
  5886   } else {
  5887     // Parallel reference processing
  5888     assert(rp->num_q() == no_of_gc_workers, "sanity");
  5889     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
  5891     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
  5892     stats = rp->process_discovered_references(&is_alive,
  5893                                               &keep_alive,
  5894                                               &drain_queue,
  5895                                               &par_task_executor,
  5896                                               _gc_timer_stw,
  5897                                               _gc_tracer_stw->gc_id());
  5900   _gc_tracer_stw->report_gc_reference_stats(stats);
  5902   // We have completed copying any necessary live referent objects.
  5903   assert(pss.queue_is_empty(), "both queue and overflow should be empty");
  5905   double ref_proc_time = os::elapsedTime() - ref_proc_start;
  5906   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
  5909 // Weak Reference processing during an evacuation pause (part 2).
  5910 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
  5911   double ref_enq_start = os::elapsedTime();
  5913   ReferenceProcessor* rp = _ref_processor_stw;
  5914   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
  5916   // Now enqueue any remaining on the discovered lists on to
  5917   // the pending list.
  5918   if (!rp->processing_is_mt()) {
  5919     // Serial reference processing...
  5920     rp->enqueue_discovered_references();
  5921   } else {
  5922     // Parallel reference enqueueing
  5924     assert(no_of_gc_workers == workers()->active_workers(),
  5925            "Need to reset active workers");
  5926     assert(rp->num_q() == no_of_gc_workers, "sanity");
  5927     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
  5929     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
  5930     rp->enqueue_discovered_references(&par_task_executor);
  5933   rp->verify_no_references_recorded();
  5934   assert(!rp->discovery_enabled(), "should have been disabled");
  5936   // FIXME
  5937   // CM's reference processing also cleans up the string and symbol tables.
  5938   // Should we do that here also? We could, but it is a serial operation
  5939   // and could significantly increase the pause time.
  5941   double ref_enq_time = os::elapsedTime() - ref_enq_start;
  5942   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
  5945 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
  5946   _expand_heap_after_alloc_failure = true;
  5947   _evacuation_failed = false;
  5949   // Should G1EvacuationFailureALot be in effect for this GC?
  5950   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
  5952   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  5954   // Disable the hot card cache.
  5955   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
  5956   hot_card_cache->reset_hot_cache_claimed_index();
  5957   hot_card_cache->set_use_cache(false);
  5959   uint n_workers;
  5960   if (G1CollectedHeap::use_parallel_gc_threads()) {
  5961     n_workers =
  5962       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  5963                                      workers()->active_workers(),
  5964                                      Threads::number_of_non_daemon_threads());
  5965     assert(UseDynamicNumberOfGCThreads ||
  5966            n_workers == workers()->total_workers(),
  5967            "If not dynamic should be using all the  workers");
  5968     workers()->set_active_workers(n_workers);
  5969     set_par_threads(n_workers);
  5970   } else {
  5971     assert(n_par_threads() == 0,
  5972            "Should be the original non-parallel value");
  5973     n_workers = 1;
  5976   G1ParTask g1_par_task(this, _task_queues);
  5978   init_for_evac_failure(NULL);
  5980   rem_set()->prepare_for_younger_refs_iterate(true);
  5982   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  5983   double start_par_time_sec = os::elapsedTime();
  5984   double end_par_time_sec;
  5987     StrongRootsScope srs(this);
  5988     // InitialMark needs claim bits to keep track of the marked-through CLDs.
  5989     if (g1_policy()->during_initial_mark_pause()) {
  5990       ClassLoaderDataGraph::clear_claimed_marks();
  5993     if (G1CollectedHeap::use_parallel_gc_threads()) {
  5994       // The individual threads will set their evac-failure closures.
  5995       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  5996       // These tasks use ShareHeap::_process_strong_tasks
  5997       assert(UseDynamicNumberOfGCThreads ||
  5998              workers()->active_workers() == workers()->total_workers(),
  5999              "If not dynamic should be using all the  workers");
  6000       workers()->run_task(&g1_par_task);
  6001     } else {
  6002       g1_par_task.set_for_termination(n_workers);
  6003       g1_par_task.work(0);
  6005     end_par_time_sec = os::elapsedTime();
  6007     // Closing the inner scope will execute the destructor
  6008     // for the StrongRootsScope object. We record the current
  6009     // elapsed time before closing the scope so that time
  6010     // taken for the SRS destructor is NOT included in the
  6011     // reported parallel time.
  6014   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
  6015   g1_policy()->phase_times()->record_par_time(par_time_ms);
  6017   double code_root_fixup_time_ms =
  6018         (os::elapsedTime() - end_par_time_sec) * 1000.0;
  6019   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
  6021   set_par_threads(0);
  6023   // Process any discovered reference objects - we have
  6024   // to do this _before_ we retire the GC alloc regions
  6025   // as we may have to copy some 'reachable' referent
  6026   // objects (and their reachable sub-graphs) that were
  6027   // not copied during the pause.
  6028   process_discovered_references(n_workers);
  6030   // Weak root processing.
  6032     G1STWIsAliveClosure is_alive(this);
  6033     G1KeepAliveClosure keep_alive(this);
  6034     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  6035     if (G1StringDedup::is_enabled()) {
  6036       G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
  6040   release_gc_alloc_regions(n_workers, evacuation_info);
  6041   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  6043   // Reset and re-enable the hot card cache.
  6044   // Note the counts for the cards in the regions in the
  6045   // collection set are reset when the collection set is freed.
  6046   hot_card_cache->reset_hot_cache();
  6047   hot_card_cache->set_use_cache(true);
  6049   // Migrate the strong code roots attached to each region in
  6050   // the collection set. Ideally we would like to do this
  6051   // after we have finished the scanning/evacuation of the
  6052   // strong code roots for a particular heap region.
  6053   migrate_strong_code_roots();
  6055   purge_code_root_memory();
  6057   if (g1_policy()->during_initial_mark_pause()) {
  6058     // Reset the claim values set during marking the strong code roots
  6059     reset_heap_region_claim_values();
  6062   finalize_for_evac_failure();
  6064   if (evacuation_failed()) {
  6065     remove_self_forwarding_pointers();
  6067     // Reset the G1EvacuationFailureALot counters and flags
  6068     // Note: the values are reset only when an actual
  6069     // evacuation failure occurs.
  6070     NOT_PRODUCT(reset_evacuation_should_fail();)
  6073   // Enqueue any remaining references remaining on the STW
  6074   // reference processor's discovered lists. We need to do
  6075   // this after the card table is cleaned (and verified) as
  6076   // the act of enqueueing entries on to the pending list
  6077   // will log these updates (and dirty their associated
  6078   // cards). We need these updates logged to update any
  6079   // RSets.
  6080   enqueue_discovered_references(n_workers);
  6082   if (G1DeferredRSUpdate) {
  6083     redirty_logged_cards();
  6085   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  6088 void G1CollectedHeap::free_region(HeapRegion* hr,
  6089                                   FreeRegionList* free_list,
  6090                                   bool par,
  6091                                   bool locked) {
  6092   assert(!hr->isHumongous(), "this is only for non-humongous regions");
  6093   assert(!hr->is_empty(), "the region should not be empty");
  6094   assert(free_list != NULL, "pre-condition");
  6096   // Clear the card counts for this region.
  6097   // Note: we only need to do this if the region is not young
  6098   // (since we don't refine cards in young regions).
  6099   if (!hr->is_young()) {
  6100     _cg1r->hot_card_cache()->reset_card_counts(hr);
  6102   hr->hr_clear(par, true /* clear_space */, locked /* locked */);
  6103   free_list->add_ordered(hr);
  6106 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
  6107                                      FreeRegionList* free_list,
  6108                                      bool par) {
  6109   assert(hr->startsHumongous(), "this is only for starts humongous regions");
  6110   assert(free_list != NULL, "pre-condition");
  6112   size_t hr_capacity = hr->capacity();
  6113   // We need to read this before we make the region non-humongous,
  6114   // otherwise the information will be gone.
  6115   uint last_index = hr->last_hc_index();
  6116   hr->set_notHumongous();
  6117   free_region(hr, free_list, par);
  6119   uint i = hr->hrs_index() + 1;
  6120   while (i < last_index) {
  6121     HeapRegion* curr_hr = region_at(i);
  6122     assert(curr_hr->continuesHumongous(), "invariant");
  6123     curr_hr->set_notHumongous();
  6124     free_region(curr_hr, free_list, par);
  6125     i += 1;
  6129 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
  6130                                        const HeapRegionSetCount& humongous_regions_removed) {
  6131   if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
  6132     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
  6133     _old_set.bulk_remove(old_regions_removed);
  6134     _humongous_set.bulk_remove(humongous_regions_removed);
  6139 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
  6140   assert(list != NULL, "list can't be null");
  6141   if (!list->is_empty()) {
  6142     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  6143     _free_list.add_ordered(list);
  6147 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
  6148   assert(_summary_bytes_used >= bytes,
  6149          err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
  6150                   _summary_bytes_used, bytes));
  6151   _summary_bytes_used -= bytes;
  6154 class G1ParCleanupCTTask : public AbstractGangTask {
  6155   G1SATBCardTableModRefBS* _ct_bs;
  6156   G1CollectedHeap* _g1h;
  6157   HeapRegion* volatile _su_head;
  6158 public:
  6159   G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
  6160                      G1CollectedHeap* g1h) :
  6161     AbstractGangTask("G1 Par Cleanup CT Task"),
  6162     _ct_bs(ct_bs), _g1h(g1h) { }
  6164   void work(uint worker_id) {
  6165     HeapRegion* r;
  6166     while (r = _g1h->pop_dirty_cards_region()) {
  6167       clear_cards(r);
  6171   void clear_cards(HeapRegion* r) {
  6172     // Cards of the survivors should have already been dirtied.
  6173     if (!r->is_survivor()) {
  6174       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  6177 };
  6179 #ifndef PRODUCT
  6180 class G1VerifyCardTableCleanup: public HeapRegionClosure {
  6181   G1CollectedHeap* _g1h;
  6182   G1SATBCardTableModRefBS* _ct_bs;
  6183 public:
  6184   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
  6185     : _g1h(g1h), _ct_bs(ct_bs) { }
  6186   virtual bool doHeapRegion(HeapRegion* r) {
  6187     if (r->is_survivor()) {
  6188       _g1h->verify_dirty_region(r);
  6189     } else {
  6190       _g1h->verify_not_dirty_region(r);
  6192     return false;
  6194 };
  6196 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
  6197   // All of the region should be clean.
  6198   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
  6199   MemRegion mr(hr->bottom(), hr->end());
  6200   ct_bs->verify_not_dirty_region(mr);
  6203 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
  6204   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
  6205   // dirty allocated blocks as they allocate them. The thread that
  6206   // retires each region and replaces it with a new one will do a
  6207   // maximal allocation to fill in [pre_dummy_top(),end()] but will
  6208   // not dirty that area (one less thing to have to do while holding
  6209   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
  6210   // is dirty.
  6211   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
  6212   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
  6213   if (hr->is_young()) {
  6214     ct_bs->verify_g1_young_region(mr);
  6215   } else {
  6216     ct_bs->verify_dirty_region(mr);
  6220 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
  6221   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
  6222   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
  6223     verify_dirty_region(hr);
  6227 void G1CollectedHeap::verify_dirty_young_regions() {
  6228   verify_dirty_young_list(_young_list->first_region());
  6230 #endif
  6232 void G1CollectedHeap::cleanUpCardTable() {
  6233   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
  6234   double start = os::elapsedTime();
  6237     // Iterate over the dirty cards region list.
  6238     G1ParCleanupCTTask cleanup_task(ct_bs, this);
  6240     if (G1CollectedHeap::use_parallel_gc_threads()) {
  6241       set_par_threads();
  6242       workers()->run_task(&cleanup_task);
  6243       set_par_threads(0);
  6244     } else {
  6245       while (_dirty_cards_region_list) {
  6246         HeapRegion* r = _dirty_cards_region_list;
  6247         cleanup_task.clear_cards(r);
  6248         _dirty_cards_region_list = r->get_next_dirty_cards_region();
  6249         if (_dirty_cards_region_list == r) {
  6250           // The last region.
  6251           _dirty_cards_region_list = NULL;
  6253         r->set_next_dirty_cards_region(NULL);
  6256 #ifndef PRODUCT
  6257     if (G1VerifyCTCleanup || VerifyAfterGC) {
  6258       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
  6259       heap_region_iterate(&cleanup_verifier);
  6261 #endif
  6264   double elapsed = os::elapsedTime() - start;
  6265   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
  6268 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
  6269   size_t pre_used = 0;
  6270   FreeRegionList local_free_list("Local List for CSet Freeing");
  6272   double young_time_ms     = 0.0;
  6273   double non_young_time_ms = 0.0;
  6275   // Since the collection set is a superset of the the young list,
  6276   // all we need to do to clear the young list is clear its
  6277   // head and length, and unlink any young regions in the code below
  6278   _young_list->clear();
  6280   G1CollectorPolicy* policy = g1_policy();
  6282   double start_sec = os::elapsedTime();
  6283   bool non_young = true;
  6285   HeapRegion* cur = cs_head;
  6286   int age_bound = -1;
  6287   size_t rs_lengths = 0;
  6289   while (cur != NULL) {
  6290     assert(!is_on_master_free_list(cur), "sanity");
  6291     if (non_young) {
  6292       if (cur->is_young()) {
  6293         double end_sec = os::elapsedTime();
  6294         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  6295         non_young_time_ms += elapsed_ms;
  6297         start_sec = os::elapsedTime();
  6298         non_young = false;
  6300     } else {
  6301       if (!cur->is_young()) {
  6302         double end_sec = os::elapsedTime();
  6303         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  6304         young_time_ms += elapsed_ms;
  6306         start_sec = os::elapsedTime();
  6307         non_young = true;
  6311     rs_lengths += cur->rem_set()->occupied_locked();
  6313     HeapRegion* next = cur->next_in_collection_set();
  6314     assert(cur->in_collection_set(), "bad CS");
  6315     cur->set_next_in_collection_set(NULL);
  6316     cur->set_in_collection_set(false);
  6318     if (cur->is_young()) {
  6319       int index = cur->young_index_in_cset();
  6320       assert(index != -1, "invariant");
  6321       assert((uint) index < policy->young_cset_region_length(), "invariant");
  6322       size_t words_survived = _surviving_young_words[index];
  6323       cur->record_surv_words_in_group(words_survived);
  6325       // At this point the we have 'popped' cur from the collection set
  6326       // (linked via next_in_collection_set()) but it is still in the
  6327       // young list (linked via next_young_region()). Clear the
  6328       // _next_young_region field.
  6329       cur->set_next_young_region(NULL);
  6330     } else {
  6331       int index = cur->young_index_in_cset();
  6332       assert(index == -1, "invariant");
  6335     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  6336             (!cur->is_young() && cur->young_index_in_cset() == -1),
  6337             "invariant" );
  6339     if (!cur->evacuation_failed()) {
  6340       MemRegion used_mr = cur->used_region();
  6342       // And the region is empty.
  6343       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
  6344       pre_used += cur->used();
  6345       free_region(cur, &local_free_list, false /* par */, true /* locked */);
  6346     } else {
  6347       cur->uninstall_surv_rate_group();
  6348       if (cur->is_young()) {
  6349         cur->set_young_index_in_cset(-1);
  6351       cur->set_not_young();
  6352       cur->set_evacuation_failed(false);
  6353       // The region is now considered to be old.
  6354       _old_set.add(cur);
  6355       evacuation_info.increment_collectionset_used_after(cur->used());
  6357     cur = next;
  6360   evacuation_info.set_regions_freed(local_free_list.length());
  6361   policy->record_max_rs_lengths(rs_lengths);
  6362   policy->cset_regions_freed();
  6364   double end_sec = os::elapsedTime();
  6365   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  6367   if (non_young) {
  6368     non_young_time_ms += elapsed_ms;
  6369   } else {
  6370     young_time_ms += elapsed_ms;
  6373   prepend_to_freelist(&local_free_list);
  6374   decrement_summary_bytes(pre_used);
  6375   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
  6376   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
  6379 // This routine is similar to the above but does not record
  6380 // any policy statistics or update free lists; we are abandoning
  6381 // the current incremental collection set in preparation of a
  6382 // full collection. After the full GC we will start to build up
  6383 // the incremental collection set again.
  6384 // This is only called when we're doing a full collection
  6385 // and is immediately followed by the tearing down of the young list.
  6387 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
  6388   HeapRegion* cur = cs_head;
  6390   while (cur != NULL) {
  6391     HeapRegion* next = cur->next_in_collection_set();
  6392     assert(cur->in_collection_set(), "bad CS");
  6393     cur->set_next_in_collection_set(NULL);
  6394     cur->set_in_collection_set(false);
  6395     cur->set_young_index_in_cset(-1);
  6396     cur = next;
  6400 void G1CollectedHeap::set_free_regions_coming() {
  6401   if (G1ConcRegionFreeingVerbose) {
  6402     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  6403                            "setting free regions coming");
  6406   assert(!free_regions_coming(), "pre-condition");
  6407   _free_regions_coming = true;
  6410 void G1CollectedHeap::reset_free_regions_coming() {
  6411   assert(free_regions_coming(), "pre-condition");
  6414     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6415     _free_regions_coming = false;
  6416     SecondaryFreeList_lock->notify_all();
  6419   if (G1ConcRegionFreeingVerbose) {
  6420     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  6421                            "reset free regions coming");
  6425 void G1CollectedHeap::wait_while_free_regions_coming() {
  6426   // Most of the time we won't have to wait, so let's do a quick test
  6427   // first before we take the lock.
  6428   if (!free_regions_coming()) {
  6429     return;
  6432   if (G1ConcRegionFreeingVerbose) {
  6433     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  6434                            "waiting for free regions");
  6438     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6439     while (free_regions_coming()) {
  6440       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
  6444   if (G1ConcRegionFreeingVerbose) {
  6445     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  6446                            "done waiting for free regions");
  6450 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  6451   assert(heap_lock_held_for_gc(),
  6452               "the heap lock should already be held by or for this thread");
  6453   _young_list->push_region(hr);
  6456 class NoYoungRegionsClosure: public HeapRegionClosure {
  6457 private:
  6458   bool _success;
  6459 public:
  6460   NoYoungRegionsClosure() : _success(true) { }
  6461   bool doHeapRegion(HeapRegion* r) {
  6462     if (r->is_young()) {
  6463       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  6464                              r->bottom(), r->end());
  6465       _success = false;
  6467     return false;
  6469   bool success() { return _success; }
  6470 };
  6472 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
  6473   bool ret = _young_list->check_list_empty(check_sample);
  6475   if (check_heap) {
  6476     NoYoungRegionsClosure closure;
  6477     heap_region_iterate(&closure);
  6478     ret = ret && closure.success();
  6481   return ret;
  6484 class TearDownRegionSetsClosure : public HeapRegionClosure {
  6485 private:
  6486   HeapRegionSet *_old_set;
  6488 public:
  6489   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
  6491   bool doHeapRegion(HeapRegion* r) {
  6492     if (r->is_empty()) {
  6493       // We ignore empty regions, we'll empty the free list afterwards
  6494     } else if (r->is_young()) {
  6495       // We ignore young regions, we'll empty the young list afterwards
  6496     } else if (r->isHumongous()) {
  6497       // We ignore humongous regions, we're not tearing down the
  6498       // humongous region set
  6499     } else {
  6500       // The rest should be old
  6501       _old_set->remove(r);
  6503     return false;
  6506   ~TearDownRegionSetsClosure() {
  6507     assert(_old_set->is_empty(), "post-condition");
  6509 };
  6511 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
  6512   assert_at_safepoint(true /* should_be_vm_thread */);
  6514   if (!free_list_only) {
  6515     TearDownRegionSetsClosure cl(&_old_set);
  6516     heap_region_iterate(&cl);
  6518     // Note that emptying the _young_list is postponed and instead done as
  6519     // the first step when rebuilding the regions sets again. The reason for
  6520     // this is that during a full GC string deduplication needs to know if
  6521     // a collected region was young or old when the full GC was initiated.
  6523   _free_list.remove_all();
  6526 class RebuildRegionSetsClosure : public HeapRegionClosure {
  6527 private:
  6528   bool            _free_list_only;
  6529   HeapRegionSet*   _old_set;
  6530   FreeRegionList* _free_list;
  6531   size_t          _total_used;
  6533 public:
  6534   RebuildRegionSetsClosure(bool free_list_only,
  6535                            HeapRegionSet* old_set, FreeRegionList* free_list) :
  6536     _free_list_only(free_list_only),
  6537     _old_set(old_set), _free_list(free_list), _total_used(0) {
  6538     assert(_free_list->is_empty(), "pre-condition");
  6539     if (!free_list_only) {
  6540       assert(_old_set->is_empty(), "pre-condition");
  6544   bool doHeapRegion(HeapRegion* r) {
  6545     if (r->continuesHumongous()) {
  6546       return false;
  6549     if (r->is_empty()) {
  6550       // Add free regions to the free list
  6551       _free_list->add_as_tail(r);
  6552     } else if (!_free_list_only) {
  6553       assert(!r->is_young(), "we should not come across young regions");
  6555       if (r->isHumongous()) {
  6556         // We ignore humongous regions, we left the humongous set unchanged
  6557       } else {
  6558         // The rest should be old, add them to the old set
  6559         _old_set->add(r);
  6561       _total_used += r->used();
  6564     return false;
  6567   size_t total_used() {
  6568     return _total_used;
  6570 };
  6572 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
  6573   assert_at_safepoint(true /* should_be_vm_thread */);
  6575   if (!free_list_only) {
  6576     _young_list->empty_list();
  6579   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
  6580   heap_region_iterate(&cl);
  6582   if (!free_list_only) {
  6583     _summary_bytes_used = cl.total_used();
  6585   assert(_summary_bytes_used == recalculate_used(),
  6586          err_msg("inconsistent _summary_bytes_used, "
  6587                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
  6588                  _summary_bytes_used, recalculate_used()));
  6591 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  6592   _refine_cte_cl->set_concurrent(concurrent);
  6595 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  6596   HeapRegion* hr = heap_region_containing(p);
  6597   if (hr == NULL) {
  6598     return false;
  6599   } else {
  6600     return hr->is_in(p);
  6604 // Methods for the mutator alloc region
  6606 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
  6607                                                       bool force) {
  6608   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6609   assert(!force || g1_policy()->can_expand_young_list(),
  6610          "if force is true we should be able to expand the young list");
  6611   bool young_list_full = g1_policy()->is_young_list_full();
  6612   if (force || !young_list_full) {
  6613     HeapRegion* new_alloc_region = new_region(word_size,
  6614                                               false /* is_old */,
  6615                                               false /* do_expand */);
  6616     if (new_alloc_region != NULL) {
  6617       set_region_short_lived_locked(new_alloc_region);
  6618       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
  6619       return new_alloc_region;
  6622   return NULL;
  6625 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
  6626                                                   size_t allocated_bytes) {
  6627   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6628   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
  6630   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
  6631   _summary_bytes_used += allocated_bytes;
  6632   _hr_printer.retire(alloc_region);
  6633   // We update the eden sizes here, when the region is retired,
  6634   // instead of when it's allocated, since this is the point that its
  6635   // used space has been recored in _summary_bytes_used.
  6636   g1mm()->update_eden_size();
  6639 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
  6640                                                     bool force) {
  6641   return _g1h->new_mutator_alloc_region(word_size, force);
  6644 void G1CollectedHeap::set_par_threads() {
  6645   // Don't change the number of workers.  Use the value previously set
  6646   // in the workgroup.
  6647   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
  6648   uint n_workers = workers()->active_workers();
  6649   assert(UseDynamicNumberOfGCThreads ||
  6650            n_workers == workers()->total_workers(),
  6651       "Otherwise should be using the total number of workers");
  6652   if (n_workers == 0) {
  6653     assert(false, "Should have been set in prior evacuation pause.");
  6654     n_workers = ParallelGCThreads;
  6655     workers()->set_active_workers(n_workers);
  6657   set_par_threads(n_workers);
  6660 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
  6661                                        size_t allocated_bytes) {
  6662   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
  6665 // Methods for the GC alloc regions
  6667 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
  6668                                                  uint count,
  6669                                                  GCAllocPurpose ap) {
  6670   assert(FreeList_lock->owned_by_self(), "pre-condition");
  6672   if (count < g1_policy()->max_regions(ap)) {
  6673     bool survivor = (ap == GCAllocForSurvived);
  6674     HeapRegion* new_alloc_region = new_region(word_size,
  6675                                               !survivor,
  6676                                               true /* do_expand */);
  6677     if (new_alloc_region != NULL) {
  6678       // We really only need to do this for old regions given that we
  6679       // should never scan survivors. But it doesn't hurt to do it
  6680       // for survivors too.
  6681       new_alloc_region->record_top_and_timestamp();
  6682       if (survivor) {
  6683         new_alloc_region->set_survivor();
  6684         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
  6685       } else {
  6686         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
  6688       bool during_im = g1_policy()->during_initial_mark_pause();
  6689       new_alloc_region->note_start_of_copying(during_im);
  6690       return new_alloc_region;
  6691     } else {
  6692       g1_policy()->note_alloc_region_limit_reached(ap);
  6695   return NULL;
  6698 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
  6699                                              size_t allocated_bytes,
  6700                                              GCAllocPurpose ap) {
  6701   bool during_im = g1_policy()->during_initial_mark_pause();
  6702   alloc_region->note_end_of_copying(during_im);
  6703   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
  6704   if (ap == GCAllocForSurvived) {
  6705     young_list()->add_survivor_region(alloc_region);
  6706   } else {
  6707     _old_set.add(alloc_region);
  6709   _hr_printer.retire(alloc_region);
  6712 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
  6713                                                        bool force) {
  6714   assert(!force, "not supported for GC alloc regions");
  6715   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
  6718 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
  6719                                           size_t allocated_bytes) {
  6720   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
  6721                                GCAllocForSurvived);
  6724 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
  6725                                                   bool force) {
  6726   assert(!force, "not supported for GC alloc regions");
  6727   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
  6730 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
  6731                                      size_t allocated_bytes) {
  6732   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
  6733                                GCAllocForTenured);
  6735 // Heap region set verification
  6737 class VerifyRegionListsClosure : public HeapRegionClosure {
  6738 private:
  6739   HeapRegionSet*   _old_set;
  6740   HeapRegionSet*   _humongous_set;
  6741   FreeRegionList*  _free_list;
  6743 public:
  6744   HeapRegionSetCount _old_count;
  6745   HeapRegionSetCount _humongous_count;
  6746   HeapRegionSetCount _free_count;
  6748   VerifyRegionListsClosure(HeapRegionSet* old_set,
  6749                            HeapRegionSet* humongous_set,
  6750                            FreeRegionList* free_list) :
  6751     _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list),
  6752     _old_count(), _humongous_count(), _free_count(){ }
  6754   bool doHeapRegion(HeapRegion* hr) {
  6755     if (hr->continuesHumongous()) {
  6756       return false;
  6759     if (hr->is_young()) {
  6760       // TODO
  6761     } else if (hr->startsHumongous()) {
  6762       assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
  6763       _humongous_count.increment(1u, hr->capacity());
  6764     } else if (hr->is_empty()) {
  6765       assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
  6766       _free_count.increment(1u, hr->capacity());
  6767     } else {
  6768       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
  6769       _old_count.increment(1u, hr->capacity());
  6771     return false;
  6774   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) {
  6775     guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
  6776     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  6777         old_set->total_capacity_bytes(), _old_count.capacity()));
  6779     guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
  6780     guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  6781         humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
  6783     guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length()));
  6784     guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  6785         free_list->total_capacity_bytes(), _free_count.capacity()));
  6787 };
  6789 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
  6790                                              HeapWord* bottom) {
  6791   HeapWord* end = bottom + HeapRegion::GrainWords;
  6792   MemRegion mr(bottom, end);
  6793   assert(_g1_reserved.contains(mr), "invariant");
  6794   // This might return NULL if the allocation fails
  6795   return new HeapRegion(hrs_index, _bot_shared, mr);
  6798 void G1CollectedHeap::verify_region_sets() {
  6799   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  6801   // First, check the explicit lists.
  6802   _free_list.verify_list();
  6804     // Given that a concurrent operation might be adding regions to
  6805     // the secondary free list we have to take the lock before
  6806     // verifying it.
  6807     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  6808     _secondary_free_list.verify_list();
  6811   // If a concurrent region freeing operation is in progress it will
  6812   // be difficult to correctly attributed any free regions we come
  6813   // across to the correct free list given that they might belong to
  6814   // one of several (free_list, secondary_free_list, any local lists,
  6815   // etc.). So, if that's the case we will skip the rest of the
  6816   // verification operation. Alternatively, waiting for the concurrent
  6817   // operation to complete will have a non-trivial effect on the GC's
  6818   // operation (no concurrent operation will last longer than the
  6819   // interval between two calls to verification) and it might hide
  6820   // any issues that we would like to catch during testing.
  6821   if (free_regions_coming()) {
  6822     return;
  6825   // Make sure we append the secondary_free_list on the free_list so
  6826   // that all free regions we will come across can be safely
  6827   // attributed to the free_list.
  6828   append_secondary_free_list_if_not_empty_with_lock();
  6830   // Finally, make sure that the region accounting in the lists is
  6831   // consistent with what we see in the heap.
  6833   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
  6834   heap_region_iterate(&cl);
  6835   cl.verify_counts(&_old_set, &_humongous_set, &_free_list);
  6838 // Optimized nmethod scanning
  6840 class RegisterNMethodOopClosure: public OopClosure {
  6841   G1CollectedHeap* _g1h;
  6842   nmethod* _nm;
  6844   template <class T> void do_oop_work(T* p) {
  6845     T heap_oop = oopDesc::load_heap_oop(p);
  6846     if (!oopDesc::is_null(heap_oop)) {
  6847       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  6848       HeapRegion* hr = _g1h->heap_region_containing(obj);
  6849       assert(!hr->continuesHumongous(),
  6850              err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
  6851                      " starting at "HR_FORMAT,
  6852                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
  6854       // HeapRegion::add_strong_code_root() avoids adding duplicate
  6855       // entries but having duplicates is  OK since we "mark" nmethods
  6856       // as visited when we scan the strong code root lists during the GC.
  6857       hr->add_strong_code_root(_nm);
  6858       assert(hr->rem_set()->strong_code_roots_list_contains(_nm),
  6859              err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT,
  6860                      _nm, HR_FORMAT_PARAMS(hr)));
  6864 public:
  6865   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
  6866     _g1h(g1h), _nm(nm) {}
  6868   void do_oop(oop* p)       { do_oop_work(p); }
  6869   void do_oop(narrowOop* p) { do_oop_work(p); }
  6870 };
  6872 class UnregisterNMethodOopClosure: public OopClosure {
  6873   G1CollectedHeap* _g1h;
  6874   nmethod* _nm;
  6876   template <class T> void do_oop_work(T* p) {
  6877     T heap_oop = oopDesc::load_heap_oop(p);
  6878     if (!oopDesc::is_null(heap_oop)) {
  6879       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  6880       HeapRegion* hr = _g1h->heap_region_containing(obj);
  6881       assert(!hr->continuesHumongous(),
  6882              err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
  6883                      " starting at "HR_FORMAT,
  6884                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
  6886       hr->remove_strong_code_root(_nm);
  6887       assert(!hr->rem_set()->strong_code_roots_list_contains(_nm),
  6888              err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT,
  6889                      _nm, HR_FORMAT_PARAMS(hr)));
  6893 public:
  6894   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
  6895     _g1h(g1h), _nm(nm) {}
  6897   void do_oop(oop* p)       { do_oop_work(p); }
  6898   void do_oop(narrowOop* p) { do_oop_work(p); }
  6899 };
  6901 void G1CollectedHeap::register_nmethod(nmethod* nm) {
  6902   CollectedHeap::register_nmethod(nm);
  6904   guarantee(nm != NULL, "sanity");
  6905   RegisterNMethodOopClosure reg_cl(this, nm);
  6906   nm->oops_do(&reg_cl);
  6909 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
  6910   CollectedHeap::unregister_nmethod(nm);
  6912   guarantee(nm != NULL, "sanity");
  6913   UnregisterNMethodOopClosure reg_cl(this, nm);
  6914   nm->oops_do(&reg_cl, true);
  6917 class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
  6918 public:
  6919   bool doHeapRegion(HeapRegion *hr) {
  6920     assert(!hr->isHumongous(),
  6921            err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
  6922                    HR_FORMAT_PARAMS(hr)));
  6923     hr->migrate_strong_code_roots();
  6924     return false;
  6926 };
  6928 void G1CollectedHeap::migrate_strong_code_roots() {
  6929   MigrateCodeRootsHeapRegionClosure cl;
  6930   double migrate_start = os::elapsedTime();
  6931   collection_set_iterate(&cl);
  6932   double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
  6933   g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
  6936 void G1CollectedHeap::purge_code_root_memory() {
  6937   double purge_start = os::elapsedTime();
  6938   G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
  6939   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
  6940   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
  6943 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
  6944   G1CollectedHeap* _g1h;
  6946 public:
  6947   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
  6948     _g1h(g1h) {}
  6950   void do_code_blob(CodeBlob* cb) {
  6951     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
  6952     if (nm == NULL) {
  6953       return;
  6956     if (ScavengeRootsInCode) {
  6957       _g1h->register_nmethod(nm);
  6960 };
  6962 void G1CollectedHeap::rebuild_strong_code_roots() {
  6963   RebuildStrongCodeRootClosure blob_cl(this);
  6964   CodeCache::blobs_do(&blob_cl);

mercurial