src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2715
abdfc822206f
child 2817
49a67202bc67
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/icBuffer.hpp"
    27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
    28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    34 #include "gc_implementation/g1/g1MarkSweep.hpp"
    35 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    36 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    37 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    39 #include "gc_implementation/g1/vm_operations_g1.hpp"
    40 #include "gc_implementation/shared/isGCActiveMark.hpp"
    41 #include "memory/gcLocker.inline.hpp"
    42 #include "memory/genOopClosures.inline.hpp"
    43 #include "memory/generationSpec.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "oops/oop.pcgc.inline.hpp"
    46 #include "runtime/aprofiler.hpp"
    47 #include "runtime/vmThread.hpp"
    49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
    51 // turn it on so that the contents of the young list (scan-only /
    52 // to-be-collected) are printed at "strategic" points before / during
    53 // / after the collection --- this is useful for debugging
    54 #define YOUNG_LIST_VERBOSE 0
    55 // CURRENT STATUS
    56 // This file is under construction.  Search for "FIXME".
    58 // INVARIANTS/NOTES
    59 //
    60 // All allocation activity covered by the G1CollectedHeap interface is
    61 // serialized by acquiring the HeapLock.  This happens in mem_allocate
    62 // and allocate_new_tlab, which are the "entry" points to the
    63 // allocation code from the rest of the JVM.  (Note that this does not
    64 // apply to TLAB allocation, which is not part of this interface: it
    65 // is done by clients of this interface.)
    67 // Local to this file.
    69 class RefineCardTableEntryClosure: public CardTableEntryClosure {
    70   SuspendibleThreadSet* _sts;
    71   G1RemSet* _g1rs;
    72   ConcurrentG1Refine* _cg1r;
    73   bool _concurrent;
    74 public:
    75   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
    76                               G1RemSet* g1rs,
    77                               ConcurrentG1Refine* cg1r) :
    78     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
    79   {}
    80   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    81     bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
    82     // This path is executed by the concurrent refine or mutator threads,
    83     // concurrently, and so we do not care if card_ptr contains references
    84     // that point into the collection set.
    85     assert(!oops_into_cset, "should be");
    87     if (_concurrent && _sts->should_yield()) {
    88       // Caller will actually yield.
    89       return false;
    90     }
    91     // Otherwise, we finished successfully; return true.
    92     return true;
    93   }
    94   void set_concurrent(bool b) { _concurrent = b; }
    95 };
    98 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
    99   int _calls;
   100   G1CollectedHeap* _g1h;
   101   CardTableModRefBS* _ctbs;
   102   int _histo[256];
   103 public:
   104   ClearLoggedCardTableEntryClosure() :
   105     _calls(0)
   106   {
   107     _g1h = G1CollectedHeap::heap();
   108     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   109     for (int i = 0; i < 256; i++) _histo[i] = 0;
   110   }
   111   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   112     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   113       _calls++;
   114       unsigned char* ujb = (unsigned char*)card_ptr;
   115       int ind = (int)(*ujb);
   116       _histo[ind]++;
   117       *card_ptr = -1;
   118     }
   119     return true;
   120   }
   121   int calls() { return _calls; }
   122   void print_histo() {
   123     gclog_or_tty->print_cr("Card table value histogram:");
   124     for (int i = 0; i < 256; i++) {
   125       if (_histo[i] != 0) {
   126         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
   127       }
   128     }
   129   }
   130 };
   132 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
   133   int _calls;
   134   G1CollectedHeap* _g1h;
   135   CardTableModRefBS* _ctbs;
   136 public:
   137   RedirtyLoggedCardTableEntryClosure() :
   138     _calls(0)
   139   {
   140     _g1h = G1CollectedHeap::heap();
   141     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   142   }
   143   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   144     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   145       _calls++;
   146       *card_ptr = 0;
   147     }
   148     return true;
   149   }
   150   int calls() { return _calls; }
   151 };
   153 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
   154 public:
   155   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   156     *card_ptr = CardTableModRefBS::dirty_card_val();
   157     return true;
   158   }
   159 };
   161 YoungList::YoungList(G1CollectedHeap* g1h)
   162   : _g1h(g1h), _head(NULL),
   163     _length(0),
   164     _last_sampled_rs_lengths(0),
   165     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
   166 {
   167   guarantee( check_list_empty(false), "just making sure..." );
   168 }
   170 void YoungList::push_region(HeapRegion *hr) {
   171   assert(!hr->is_young(), "should not already be young");
   172   assert(hr->get_next_young_region() == NULL, "cause it should!");
   174   hr->set_next_young_region(_head);
   175   _head = hr;
   177   hr->set_young();
   178   double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
   179   ++_length;
   180 }
   182 void YoungList::add_survivor_region(HeapRegion* hr) {
   183   assert(hr->is_survivor(), "should be flagged as survivor region");
   184   assert(hr->get_next_young_region() == NULL, "cause it should!");
   186   hr->set_next_young_region(_survivor_head);
   187   if (_survivor_head == NULL) {
   188     _survivor_tail = hr;
   189   }
   190   _survivor_head = hr;
   192   ++_survivor_length;
   193 }
   195 void YoungList::empty_list(HeapRegion* list) {
   196   while (list != NULL) {
   197     HeapRegion* next = list->get_next_young_region();
   198     list->set_next_young_region(NULL);
   199     list->uninstall_surv_rate_group();
   200     list->set_not_young();
   201     list = next;
   202   }
   203 }
   205 void YoungList::empty_list() {
   206   assert(check_list_well_formed(), "young list should be well formed");
   208   empty_list(_head);
   209   _head = NULL;
   210   _length = 0;
   212   empty_list(_survivor_head);
   213   _survivor_head = NULL;
   214   _survivor_tail = NULL;
   215   _survivor_length = 0;
   217   _last_sampled_rs_lengths = 0;
   219   assert(check_list_empty(false), "just making sure...");
   220 }
   222 bool YoungList::check_list_well_formed() {
   223   bool ret = true;
   225   size_t length = 0;
   226   HeapRegion* curr = _head;
   227   HeapRegion* last = NULL;
   228   while (curr != NULL) {
   229     if (!curr->is_young()) {
   230       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   231                              "incorrectly tagged (y: %d, surv: %d)",
   232                              curr->bottom(), curr->end(),
   233                              curr->is_young(), curr->is_survivor());
   234       ret = false;
   235     }
   236     ++length;
   237     last = curr;
   238     curr = curr->get_next_young_region();
   239   }
   240   ret = ret && (length == _length);
   242   if (!ret) {
   243     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   244     gclog_or_tty->print_cr("###   list has %d entries, _length is %d",
   245                            length, _length);
   246   }
   248   return ret;
   249 }
   251 bool YoungList::check_list_empty(bool check_sample) {
   252   bool ret = true;
   254   if (_length != 0) {
   255     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
   256                   _length);
   257     ret = false;
   258   }
   259   if (check_sample && _last_sampled_rs_lengths != 0) {
   260     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   261     ret = false;
   262   }
   263   if (_head != NULL) {
   264     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   265     ret = false;
   266   }
   267   if (!ret) {
   268     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   269   }
   271   return ret;
   272 }
   274 void
   275 YoungList::rs_length_sampling_init() {
   276   _sampled_rs_lengths = 0;
   277   _curr               = _head;
   278 }
   280 bool
   281 YoungList::rs_length_sampling_more() {
   282   return _curr != NULL;
   283 }
   285 void
   286 YoungList::rs_length_sampling_next() {
   287   assert( _curr != NULL, "invariant" );
   288   size_t rs_length = _curr->rem_set()->occupied();
   290   _sampled_rs_lengths += rs_length;
   292   // The current region may not yet have been added to the
   293   // incremental collection set (it gets added when it is
   294   // retired as the current allocation region).
   295   if (_curr->in_collection_set()) {
   296     // Update the collection set policy information for this region
   297     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
   298   }
   300   _curr = _curr->get_next_young_region();
   301   if (_curr == NULL) {
   302     _last_sampled_rs_lengths = _sampled_rs_lengths;
   303     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   304   }
   305 }
   307 void
   308 YoungList::reset_auxilary_lists() {
   309   guarantee( is_empty(), "young list should be empty" );
   310   assert(check_list_well_formed(), "young list should be well formed");
   312   // Add survivor regions to SurvRateGroup.
   313   _g1h->g1_policy()->note_start_adding_survivor_regions();
   314   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   316   for (HeapRegion* curr = _survivor_head;
   317        curr != NULL;
   318        curr = curr->get_next_young_region()) {
   319     _g1h->g1_policy()->set_region_survivors(curr);
   321     // The region is a non-empty survivor so let's add it to
   322     // the incremental collection set for the next evacuation
   323     // pause.
   324     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   325   }
   326   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   328   _head   = _survivor_head;
   329   _length = _survivor_length;
   330   if (_survivor_head != NULL) {
   331     assert(_survivor_tail != NULL, "cause it shouldn't be");
   332     assert(_survivor_length > 0, "invariant");
   333     _survivor_tail->set_next_young_region(NULL);
   334   }
   336   // Don't clear the survivor list handles until the start of
   337   // the next evacuation pause - we need it in order to re-tag
   338   // the survivor regions from this evacuation pause as 'young'
   339   // at the start of the next.
   341   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   343   assert(check_list_well_formed(), "young list should be well formed");
   344 }
   346 void YoungList::print() {
   347   HeapRegion* lists[] = {_head,   _survivor_head};
   348   const char* names[] = {"YOUNG", "SURVIVOR"};
   350   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   351     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   352     HeapRegion *curr = lists[list];
   353     if (curr == NULL)
   354       gclog_or_tty->print_cr("  empty");
   355     while (curr != NULL) {
   356       gclog_or_tty->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
   357                              "age: %4d, y: %d, surv: %d",
   358                              curr->bottom(), curr->end(),
   359                              curr->top(),
   360                              curr->prev_top_at_mark_start(),
   361                              curr->next_top_at_mark_start(),
   362                              curr->top_at_conc_mark_count(),
   363                              curr->age_in_surv_rate_group_cond(),
   364                              curr->is_young(),
   365                              curr->is_survivor());
   366       curr = curr->get_next_young_region();
   367     }
   368   }
   370   gclog_or_tty->print_cr("");
   371 }
   373 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   374 {
   375   // Claim the right to put the region on the dirty cards region list
   376   // by installing a self pointer.
   377   HeapRegion* next = hr->get_next_dirty_cards_region();
   378   if (next == NULL) {
   379     HeapRegion* res = (HeapRegion*)
   380       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   381                           NULL);
   382     if (res == NULL) {
   383       HeapRegion* head;
   384       do {
   385         // Put the region to the dirty cards region list.
   386         head = _dirty_cards_region_list;
   387         next = (HeapRegion*)
   388           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   389         if (next == head) {
   390           assert(hr->get_next_dirty_cards_region() == hr,
   391                  "hr->get_next_dirty_cards_region() != hr");
   392           if (next == NULL) {
   393             // The last region in the list points to itself.
   394             hr->set_next_dirty_cards_region(hr);
   395           } else {
   396             hr->set_next_dirty_cards_region(next);
   397           }
   398         }
   399       } while (next != head);
   400     }
   401   }
   402 }
   404 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   405 {
   406   HeapRegion* head;
   407   HeapRegion* hr;
   408   do {
   409     head = _dirty_cards_region_list;
   410     if (head == NULL) {
   411       return NULL;
   412     }
   413     HeapRegion* new_head = head->get_next_dirty_cards_region();
   414     if (head == new_head) {
   415       // The last region.
   416       new_head = NULL;
   417     }
   418     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   419                                           head);
   420   } while (hr != head);
   421   assert(hr != NULL, "invariant");
   422   hr->set_next_dirty_cards_region(NULL);
   423   return hr;
   424 }
   426 void G1CollectedHeap::stop_conc_gc_threads() {
   427   _cg1r->stop();
   428   _cmThread->stop();
   429 }
   431 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   432   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   433   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   435   // Count the dirty cards at the start.
   436   CountNonCleanMemRegionClosure count1(this);
   437   ct_bs->mod_card_iterate(&count1);
   438   int orig_count = count1.n();
   440   // First clear the logged cards.
   441   ClearLoggedCardTableEntryClosure clear;
   442   dcqs.set_closure(&clear);
   443   dcqs.apply_closure_to_all_completed_buffers();
   444   dcqs.iterate_closure_all_threads(false);
   445   clear.print_histo();
   447   // Now ensure that there's no dirty cards.
   448   CountNonCleanMemRegionClosure count2(this);
   449   ct_bs->mod_card_iterate(&count2);
   450   if (count2.n() != 0) {
   451     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   452                            count2.n(), orig_count);
   453   }
   454   guarantee(count2.n() == 0, "Card table should be clean.");
   456   RedirtyLoggedCardTableEntryClosure redirty;
   457   JavaThread::dirty_card_queue_set().set_closure(&redirty);
   458   dcqs.apply_closure_to_all_completed_buffers();
   459   dcqs.iterate_closure_all_threads(false);
   460   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   461                          clear.calls(), orig_count);
   462   guarantee(redirty.calls() == clear.calls(),
   463             "Or else mechanism is broken.");
   465   CountNonCleanMemRegionClosure count3(this);
   466   ct_bs->mod_card_iterate(&count3);
   467   if (count3.n() != orig_count) {
   468     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   469                            orig_count, count3.n());
   470     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   471   }
   473   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
   474 }
   476 // Private class members.
   478 G1CollectedHeap* G1CollectedHeap::_g1h;
   480 // Private methods.
   482 HeapRegion*
   483 G1CollectedHeap::new_region_try_secondary_free_list() {
   484   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   485   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
   486     if (!_secondary_free_list.is_empty()) {
   487       if (G1ConcRegionFreeingVerbose) {
   488         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   489                                "secondary_free_list has "SIZE_FORMAT" entries",
   490                                _secondary_free_list.length());
   491       }
   492       // It looks as if there are free regions available on the
   493       // secondary_free_list. Let's move them to the free_list and try
   494       // again to allocate from it.
   495       append_secondary_free_list();
   497       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
   498              "empty we should have moved at least one entry to the free_list");
   499       HeapRegion* res = _free_list.remove_head();
   500       if (G1ConcRegionFreeingVerbose) {
   501         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   502                                "allocated "HR_FORMAT" from secondary_free_list",
   503                                HR_FORMAT_PARAMS(res));
   504       }
   505       return res;
   506     }
   508     // Wait here until we get notifed either when (a) there are no
   509     // more free regions coming or (b) some regions have been moved on
   510     // the secondary_free_list.
   511     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
   512   }
   514   if (G1ConcRegionFreeingVerbose) {
   515     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   516                            "could not allocate from secondary_free_list");
   517   }
   518   return NULL;
   519 }
   521 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
   522   assert(!isHumongous(word_size) ||
   523                                   word_size <= (size_t) HeapRegion::GrainWords,
   524          "the only time we use this to allocate a humongous region is "
   525          "when we are allocating a single humongous region");
   527   HeapRegion* res;
   528   if (G1StressConcRegionFreeing) {
   529     if (!_secondary_free_list.is_empty()) {
   530       if (G1ConcRegionFreeingVerbose) {
   531         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   532                                "forced to look at the secondary_free_list");
   533       }
   534       res = new_region_try_secondary_free_list();
   535       if (res != NULL) {
   536         return res;
   537       }
   538     }
   539   }
   540   res = _free_list.remove_head_or_null();
   541   if (res == NULL) {
   542     if (G1ConcRegionFreeingVerbose) {
   543       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   544                              "res == NULL, trying the secondary_free_list");
   545     }
   546     res = new_region_try_secondary_free_list();
   547   }
   548   if (res == NULL && do_expand) {
   549     if (expand(word_size * HeapWordSize)) {
   550       // The expansion succeeded and so we should have at least one
   551       // region on the free list.
   552       res = _free_list.remove_head();
   553     }
   554   }
   555   if (res != NULL) {
   556     if (G1PrintHeapRegions) {
   557       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], "
   558                              "top "PTR_FORMAT, res->hrs_index(),
   559                              res->bottom(), res->end(), res->top());
   560     }
   561   }
   562   return res;
   563 }
   565 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
   566                                                  size_t word_size) {
   567   HeapRegion* alloc_region = NULL;
   568   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   569     alloc_region = new_region(word_size, true /* do_expand */);
   570     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   571       alloc_region->set_survivor();
   572     }
   573     ++_gc_alloc_region_counts[purpose];
   574   } else {
   575     g1_policy()->note_alloc_region_limit_reached(purpose);
   576   }
   577   return alloc_region;
   578 }
   580 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
   581                                                        size_t word_size) {
   582   assert(isHumongous(word_size), "word_size should be humongous");
   583   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   585   int first = -1;
   586   if (num_regions == 1) {
   587     // Only one region to allocate, no need to go through the slower
   588     // path. The caller will attempt the expasion if this fails, so
   589     // let's not try to expand here too.
   590     HeapRegion* hr = new_region(word_size, false /* do_expand */);
   591     if (hr != NULL) {
   592       first = hr->hrs_index();
   593     } else {
   594       first = -1;
   595     }
   596   } else {
   597     // We can't allocate humongous regions while cleanupComplete() is
   598     // running, since some of the regions we find to be empty might not
   599     // yet be added to the free list and it is not straightforward to
   600     // know which list they are on so that we can remove them. Note
   601     // that we only need to do this if we need to allocate more than
   602     // one region to satisfy the current humongous allocation
   603     // request. If we are only allocating one region we use the common
   604     // region allocation code (see above).
   605     wait_while_free_regions_coming();
   606     append_secondary_free_list_if_not_empty_with_lock();
   608     if (free_regions() >= num_regions) {
   609       first = _hrs->find_contiguous(num_regions);
   610       if (first != -1) {
   611         for (int i = first; i < first + (int) num_regions; ++i) {
   612           HeapRegion* hr = _hrs->at(i);
   613           assert(hr->is_empty(), "sanity");
   614           assert(is_on_master_free_list(hr), "sanity");
   615           hr->set_pending_removal(true);
   616         }
   617         _free_list.remove_all_pending(num_regions);
   618       }
   619     }
   620   }
   621   return first;
   622 }
   624 HeapWord*
   625 G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
   626                                                            size_t num_regions,
   627                                                            size_t word_size) {
   628   assert(first != -1, "pre-condition");
   629   assert(isHumongous(word_size), "word_size should be humongous");
   630   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
   632   // Index of last region in the series + 1.
   633   int last = first + (int) num_regions;
   635   // We need to initialize the region(s) we just discovered. This is
   636   // a bit tricky given that it can happen concurrently with
   637   // refinement threads refining cards on these regions and
   638   // potentially wanting to refine the BOT as they are scanning
   639   // those cards (this can happen shortly after a cleanup; see CR
   640   // 6991377). So we have to set up the region(s) carefully and in
   641   // a specific order.
   643   // The word size sum of all the regions we will allocate.
   644   size_t word_size_sum = num_regions * HeapRegion::GrainWords;
   645   assert(word_size <= word_size_sum, "sanity");
   647   // This will be the "starts humongous" region.
   648   HeapRegion* first_hr = _hrs->at(first);
   649   // The header of the new object will be placed at the bottom of
   650   // the first region.
   651   HeapWord* new_obj = first_hr->bottom();
   652   // This will be the new end of the first region in the series that
   653   // should also match the end of the last region in the seriers.
   654   HeapWord* new_end = new_obj + word_size_sum;
   655   // This will be the new top of the first region that will reflect
   656   // this allocation.
   657   HeapWord* new_top = new_obj + word_size;
   659   // First, we need to zero the header of the space that we will be
   660   // allocating. When we update top further down, some refinement
   661   // threads might try to scan the region. By zeroing the header we
   662   // ensure that any thread that will try to scan the region will
   663   // come across the zero klass word and bail out.
   664   //
   665   // NOTE: It would not have been correct to have used
   666   // CollectedHeap::fill_with_object() and make the space look like
   667   // an int array. The thread that is doing the allocation will
   668   // later update the object header to a potentially different array
   669   // type and, for a very short period of time, the klass and length
   670   // fields will be inconsistent. This could cause a refinement
   671   // thread to calculate the object size incorrectly.
   672   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   674   // We will set up the first region as "starts humongous". This
   675   // will also update the BOT covering all the regions to reflect
   676   // that there is a single object that starts at the bottom of the
   677   // first region.
   678   first_hr->set_startsHumongous(new_top, new_end);
   680   // Then, if there are any, we will set up the "continues
   681   // humongous" regions.
   682   HeapRegion* hr = NULL;
   683   for (int i = first + 1; i < last; ++i) {
   684     hr = _hrs->at(i);
   685     hr->set_continuesHumongous(first_hr);
   686   }
   687   // If we have "continues humongous" regions (hr != NULL), then the
   688   // end of the last one should match new_end.
   689   assert(hr == NULL || hr->end() == new_end, "sanity");
   691   // Up to this point no concurrent thread would have been able to
   692   // do any scanning on any region in this series. All the top
   693   // fields still point to bottom, so the intersection between
   694   // [bottom,top] and [card_start,card_end] will be empty. Before we
   695   // update the top fields, we'll do a storestore to make sure that
   696   // no thread sees the update to top before the zeroing of the
   697   // object header and the BOT initialization.
   698   OrderAccess::storestore();
   700   // Now that the BOT and the object header have been initialized,
   701   // we can update top of the "starts humongous" region.
   702   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
   703          "new_top should be in this region");
   704   first_hr->set_top(new_top);
   706   // Now, we will update the top fields of the "continues humongous"
   707   // regions. The reason we need to do this is that, otherwise,
   708   // these regions would look empty and this will confuse parts of
   709   // G1. For example, the code that looks for a consecutive number
   710   // of empty regions will consider them empty and try to
   711   // re-allocate them. We can extend is_empty() to also include
   712   // !continuesHumongous(), but it is easier to just update the top
   713   // fields here. The way we set top for all regions (i.e., top ==
   714   // end for all regions but the last one, top == new_top for the
   715   // last one) is actually used when we will free up the humongous
   716   // region in free_humongous_region().
   717   hr = NULL;
   718   for (int i = first + 1; i < last; ++i) {
   719     hr = _hrs->at(i);
   720     if ((i + 1) == last) {
   721       // last continues humongous region
   722       assert(hr->bottom() < new_top && new_top <= hr->end(),
   723              "new_top should fall on this region");
   724       hr->set_top(new_top);
   725     } else {
   726       // not last one
   727       assert(new_top > hr->end(), "new_top should be above this region");
   728       hr->set_top(hr->end());
   729     }
   730   }
   731   // If we have continues humongous regions (hr != NULL), then the
   732   // end of the last one should match new_end and its top should
   733   // match new_top.
   734   assert(hr == NULL ||
   735          (hr->end() == new_end && hr->top() == new_top), "sanity");
   737   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
   738   _summary_bytes_used += first_hr->used();
   739   _humongous_set.add(first_hr);
   741   return new_obj;
   742 }
   744 // If could fit into free regions w/o expansion, try.
   745 // Otherwise, if can expand, do so.
   746 // Otherwise, if using ex regions might help, try with ex given back.
   747 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   748   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   750   verify_region_sets_optional();
   752   size_t num_regions =
   753          round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   754   size_t x_size = expansion_regions();
   755   size_t fs = _hrs->free_suffix();
   756   int first = humongous_obj_allocate_find_first(num_regions, word_size);
   757   if (first == -1) {
   758     // The only thing we can do now is attempt expansion.
   759     if (fs + x_size >= num_regions) {
   760       // If the number of regions we're trying to allocate for this
   761       // object is at most the number of regions in the free suffix,
   762       // then the call to humongous_obj_allocate_find_first() above
   763       // should have succeeded and we wouldn't be here.
   764       //
   765       // We should only be trying to expand when the free suffix is
   766       // not sufficient for the object _and_ we have some expansion
   767       // room available.
   768       assert(num_regions > fs, "earlier allocation should have succeeded");
   770       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
   771         first = humongous_obj_allocate_find_first(num_regions, word_size);
   772         // If the expansion was successful then the allocation
   773         // should have been successful.
   774         assert(first != -1, "this should have worked");
   775       }
   776     }
   777   }
   779   HeapWord* result = NULL;
   780   if (first != -1) {
   781     result =
   782       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
   783     assert(result != NULL, "it should always return a valid result");
   784   }
   786   verify_region_sets_optional();
   788   return result;
   789 }
   791 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   792   assert_heap_not_locked_and_not_at_safepoint();
   793   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
   795   unsigned int dummy_gc_count_before;
   796   return attempt_allocation(word_size, &dummy_gc_count_before);
   797 }
   799 HeapWord*
   800 G1CollectedHeap::mem_allocate(size_t word_size,
   801                               bool   is_noref,
   802                               bool   is_tlab,
   803                               bool*  gc_overhead_limit_was_exceeded) {
   804   assert_heap_not_locked_and_not_at_safepoint();
   805   assert(!is_tlab, "mem_allocate() this should not be called directly "
   806          "to allocate TLABs");
   808   // Loop until the allocation is satisified, or unsatisfied after GC.
   809   for (int try_count = 1; /* we'll return */; try_count += 1) {
   810     unsigned int gc_count_before;
   812     HeapWord* result = NULL;
   813     if (!isHumongous(word_size)) {
   814       result = attempt_allocation(word_size, &gc_count_before);
   815     } else {
   816       result = attempt_allocation_humongous(word_size, &gc_count_before);
   817     }
   818     if (result != NULL) {
   819       return result;
   820     }
   822     // Create the garbage collection operation...
   823     VM_G1CollectForAllocation op(gc_count_before, word_size);
   824     // ...and get the VM thread to execute it.
   825     VMThread::execute(&op);
   827     if (op.prologue_succeeded() && op.pause_succeeded()) {
   828       // If the operation was successful we'll return the result even
   829       // if it is NULL. If the allocation attempt failed immediately
   830       // after a Full GC, it's unlikely we'll be able to allocate now.
   831       HeapWord* result = op.result();
   832       if (result != NULL && !isHumongous(word_size)) {
   833         // Allocations that take place on VM operations do not do any
   834         // card dirtying and we have to do it here. We only have to do
   835         // this for non-humongous allocations, though.
   836         dirty_young_block(result, word_size);
   837       }
   838       return result;
   839     } else {
   840       assert(op.result() == NULL,
   841              "the result should be NULL if the VM op did not succeed");
   842     }
   844     // Give a warning if we seem to be looping forever.
   845     if ((QueuedAllocationWarningCount > 0) &&
   846         (try_count % QueuedAllocationWarningCount == 0)) {
   847       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
   848     }
   849   }
   851   ShouldNotReachHere();
   852   return NULL;
   853 }
   855 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   856                                            unsigned int *gc_count_before_ret) {
   857   // Make sure you read the note in attempt_allocation_humongous().
   859   assert_heap_not_locked_and_not_at_safepoint();
   860   assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
   861          "be called for humongous allocation requests");
   863   // We should only get here after the first-level allocation attempt
   864   // (attempt_allocation()) failed to allocate.
   866   // We will loop until a) we manage to successfully perform the
   867   // allocation or b) we successfully schedule a collection which
   868   // fails to perform the allocation. b) is the only case when we'll
   869   // return NULL.
   870   HeapWord* result = NULL;
   871   for (int try_count = 1; /* we'll return */; try_count += 1) {
   872     bool should_try_gc;
   873     unsigned int gc_count_before;
   875     {
   876       MutexLockerEx x(Heap_lock);
   878       result = _mutator_alloc_region.attempt_allocation_locked(word_size,
   879                                                       false /* bot_updates */);
   880       if (result != NULL) {
   881         return result;
   882       }
   884       // If we reach here, attempt_allocation_locked() above failed to
   885       // allocate a new region. So the mutator alloc region should be NULL.
   886       assert(_mutator_alloc_region.get() == NULL, "only way to get here");
   888       if (GC_locker::is_active_and_needs_gc()) {
   889         if (g1_policy()->can_expand_young_list()) {
   890           result = _mutator_alloc_region.attempt_allocation_force(word_size,
   891                                                       false /* bot_updates */);
   892           if (result != NULL) {
   893             return result;
   894           }
   895         }
   896         should_try_gc = false;
   897       } else {
   898         // Read the GC count while still holding the Heap_lock.
   899         gc_count_before = SharedHeap::heap()->total_collections();
   900         should_try_gc = true;
   901       }
   902     }
   904     if (should_try_gc) {
   905       bool succeeded;
   906       result = do_collection_pause(word_size, gc_count_before, &succeeded);
   907       if (result != NULL) {
   908         assert(succeeded, "only way to get back a non-NULL result");
   909         return result;
   910       }
   912       if (succeeded) {
   913         // If we get here we successfully scheduled a collection which
   914         // failed to allocate. No point in trying to allocate
   915         // further. We'll just return NULL.
   916         MutexLockerEx x(Heap_lock);
   917         *gc_count_before_ret = SharedHeap::heap()->total_collections();
   918         return NULL;
   919       }
   920     } else {
   921       GC_locker::stall_until_clear();
   922     }
   924     // We can reach here if we were unsuccessul in scheduling a
   925     // collection (because another thread beat us to it) or if we were
   926     // stalled due to the GC locker. In either can we should retry the
   927     // allocation attempt in case another thread successfully
   928     // performed a collection and reclaimed enough space. We do the
   929     // first attempt (without holding the Heap_lock) here and the
   930     // follow-on attempt will be at the start of the next loop
   931     // iteration (after taking the Heap_lock).
   932     result = _mutator_alloc_region.attempt_allocation(word_size,
   933                                                       false /* bot_updates */);
   934     if (result != NULL ){
   935       return result;
   936     }
   938     // Give a warning if we seem to be looping forever.
   939     if ((QueuedAllocationWarningCount > 0) &&
   940         (try_count % QueuedAllocationWarningCount == 0)) {
   941       warning("G1CollectedHeap::attempt_allocation_slow() "
   942               "retries %d times", try_count);
   943     }
   944   }
   946   ShouldNotReachHere();
   947   return NULL;
   948 }
   950 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
   951                                           unsigned int * gc_count_before_ret) {
   952   // The structure of this method has a lot of similarities to
   953   // attempt_allocation_slow(). The reason these two were not merged
   954   // into a single one is that such a method would require several "if
   955   // allocation is not humongous do this, otherwise do that"
   956   // conditional paths which would obscure its flow. In fact, an early
   957   // version of this code did use a unified method which was harder to
   958   // follow and, as a result, it had subtle bugs that were hard to
   959   // track down. So keeping these two methods separate allows each to
   960   // be more readable. It will be good to keep these two in sync as
   961   // much as possible.
   963   assert_heap_not_locked_and_not_at_safepoint();
   964   assert(isHumongous(word_size), "attempt_allocation_humongous() "
   965          "should only be called for humongous allocations");
   967   // We will loop until a) we manage to successfully perform the
   968   // allocation or b) we successfully schedule a collection which
   969   // fails to perform the allocation. b) is the only case when we'll
   970   // return NULL.
   971   HeapWord* result = NULL;
   972   for (int try_count = 1; /* we'll return */; try_count += 1) {
   973     bool should_try_gc;
   974     unsigned int gc_count_before;
   976     {
   977       MutexLockerEx x(Heap_lock);
   979       // Given that humongous objects are not allocated in young
   980       // regions, we'll first try to do the allocation without doing a
   981       // collection hoping that there's enough space in the heap.
   982       result = humongous_obj_allocate(word_size);
   983       if (result != NULL) {
   984         return result;
   985       }
   987       if (GC_locker::is_active_and_needs_gc()) {
   988         should_try_gc = false;
   989       } else {
   990         // Read the GC count while still holding the Heap_lock.
   991         gc_count_before = SharedHeap::heap()->total_collections();
   992         should_try_gc = true;
   993       }
   994     }
   996     if (should_try_gc) {
   997       // If we failed to allocate the humongous object, we should try to
   998       // do a collection pause (if we're allowed) in case it reclaims
   999       // enough space for the allocation to succeed after the pause.
  1001       bool succeeded;
  1002       result = do_collection_pause(word_size, gc_count_before, &succeeded);
  1003       if (result != NULL) {
  1004         assert(succeeded, "only way to get back a non-NULL result");
  1005         return result;
  1008       if (succeeded) {
  1009         // If we get here we successfully scheduled a collection which
  1010         // failed to allocate. No point in trying to allocate
  1011         // further. We'll just return NULL.
  1012         MutexLockerEx x(Heap_lock);
  1013         *gc_count_before_ret = SharedHeap::heap()->total_collections();
  1014         return NULL;
  1016     } else {
  1017       GC_locker::stall_until_clear();
  1020     // We can reach here if we were unsuccessul in scheduling a
  1021     // collection (because another thread beat us to it) or if we were
  1022     // stalled due to the GC locker. In either can we should retry the
  1023     // allocation attempt in case another thread successfully
  1024     // performed a collection and reclaimed enough space.  Give a
  1025     // warning if we seem to be looping forever.
  1027     if ((QueuedAllocationWarningCount > 0) &&
  1028         (try_count % QueuedAllocationWarningCount == 0)) {
  1029       warning("G1CollectedHeap::attempt_allocation_humongous() "
  1030               "retries %d times", try_count);
  1034   ShouldNotReachHere();
  1035   return NULL;
  1038 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
  1039                                        bool expect_null_mutator_alloc_region) {
  1040   assert_at_safepoint(true /* should_be_vm_thread */);
  1041   assert(_mutator_alloc_region.get() == NULL ||
  1042                                              !expect_null_mutator_alloc_region,
  1043          "the current alloc region was unexpectedly found to be non-NULL");
  1045   if (!isHumongous(word_size)) {
  1046     return _mutator_alloc_region.attempt_allocation_locked(word_size,
  1047                                                       false /* bot_updates */);
  1048   } else {
  1049     return humongous_obj_allocate(word_size);
  1052   ShouldNotReachHere();
  1055 void G1CollectedHeap::abandon_gc_alloc_regions() {
  1056   // first, make sure that the GC alloc region list is empty (it should!)
  1057   assert(_gc_alloc_region_list == NULL, "invariant");
  1058   release_gc_alloc_regions(true /* totally */);
  1061 class PostMCRemSetClearClosure: public HeapRegionClosure {
  1062   ModRefBarrierSet* _mr_bs;
  1063 public:
  1064   PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
  1065   bool doHeapRegion(HeapRegion* r) {
  1066     r->reset_gc_time_stamp();
  1067     if (r->continuesHumongous())
  1068       return false;
  1069     HeapRegionRemSet* hrrs = r->rem_set();
  1070     if (hrrs != NULL) hrrs->clear();
  1071     // You might think here that we could clear just the cards
  1072     // corresponding to the used region.  But no: if we leave a dirty card
  1073     // in a region we might allocate into, then it would prevent that card
  1074     // from being enqueued, and cause it to be missed.
  1075     // Re: the performance cost: we shouldn't be doing full GC anyway!
  1076     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  1077     return false;
  1079 };
  1082 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
  1083   ModRefBarrierSet* _mr_bs;
  1084 public:
  1085   PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
  1086   bool doHeapRegion(HeapRegion* r) {
  1087     if (r->continuesHumongous()) return false;
  1088     if (r->used_region().word_size() != 0) {
  1089       _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
  1091     return false;
  1093 };
  1095 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  1096   G1CollectedHeap*   _g1h;
  1097   UpdateRSOopClosure _cl;
  1098   int                _worker_i;
  1099 public:
  1100   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
  1101     _cl(g1->g1_rem_set(), worker_i),
  1102     _worker_i(worker_i),
  1103     _g1h(g1)
  1104   { }
  1106   bool doHeapRegion(HeapRegion* r) {
  1107     if (!r->continuesHumongous()) {
  1108       _cl.set_from(r);
  1109       r->oop_iterate(&_cl);
  1111     return false;
  1113 };
  1115 class ParRebuildRSTask: public AbstractGangTask {
  1116   G1CollectedHeap* _g1;
  1117 public:
  1118   ParRebuildRSTask(G1CollectedHeap* g1)
  1119     : AbstractGangTask("ParRebuildRSTask"),
  1120       _g1(g1)
  1121   { }
  1123   void work(int i) {
  1124     RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
  1125     _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
  1126                                          HeapRegion::RebuildRSClaimValue);
  1128 };
  1130 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1131                                     bool clear_all_soft_refs,
  1132                                     size_t word_size) {
  1133   assert_at_safepoint(true /* should_be_vm_thread */);
  1135   if (GC_locker::check_active_before_gc()) {
  1136     return false;
  1139   SvcGCMarker sgcm(SvcGCMarker::FULL);
  1140   ResourceMark rm;
  1142   if (PrintHeapAtGC) {
  1143     Universe::print_heap_before_gc();
  1146   verify_region_sets_optional();
  1148   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
  1149                            collector_policy()->should_clear_all_soft_refs();
  1151   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  1154     IsGCActiveMark x;
  1156     // Timing
  1157     bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
  1158     assert(!system_gc || explicit_gc, "invariant");
  1159     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  1160     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  1161     TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
  1162                 PrintGC, true, gclog_or_tty);
  1164     TraceMemoryManagerStats tms(true /* fullGC */);
  1166     double start = os::elapsedTime();
  1167     g1_policy()->record_full_collection_start();
  1169     wait_while_free_regions_coming();
  1170     append_secondary_free_list_if_not_empty_with_lock();
  1172     gc_prologue(true);
  1173     increment_total_collections(true /* full gc */);
  1175     size_t g1h_prev_used = used();
  1176     assert(used() == recalculate_used(), "Should be equal");
  1178     if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  1179       HandleMark hm;  // Discard invalid handles created during verification
  1180       gclog_or_tty->print(" VerifyBeforeGC:");
  1181       prepare_for_verify();
  1182       Universe::verify(true);
  1185     COMPILER2_PRESENT(DerivedPointerTable::clear());
  1187     // We want to discover references, but not process them yet.
  1188     // This mode is disabled in
  1189     // instanceRefKlass::process_discovered_references if the
  1190     // generation does some collection work, or
  1191     // instanceRefKlass::enqueue_discovered_references if the
  1192     // generation returns without doing any work.
  1193     ref_processor()->disable_discovery();
  1194     ref_processor()->abandon_partial_discovery();
  1195     ref_processor()->verify_no_references_recorded();
  1197     // Abandon current iterations of concurrent marking and concurrent
  1198     // refinement, if any are in progress.
  1199     concurrent_mark()->abort();
  1201     // Make sure we'll choose a new allocation region afterwards.
  1202     release_mutator_alloc_region();
  1203     abandon_gc_alloc_regions();
  1204     g1_rem_set()->cleanupHRRS();
  1205     tear_down_region_lists();
  1207     // We may have added regions to the current incremental collection
  1208     // set between the last GC or pause and now. We need to clear the
  1209     // incremental collection set and then start rebuilding it afresh
  1210     // after this full GC.
  1211     abandon_collection_set(g1_policy()->inc_cset_head());
  1212     g1_policy()->clear_incremental_cset();
  1213     g1_policy()->stop_incremental_cset_building();
  1215     if (g1_policy()->in_young_gc_mode()) {
  1216       empty_young_list();
  1217       g1_policy()->set_full_young_gcs(true);
  1220     // See the comment in G1CollectedHeap::ref_processing_init() about
  1221     // how reference processing currently works in G1.
  1223     // Temporarily make reference _discovery_ single threaded (non-MT).
  1224     ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
  1226     // Temporarily make refs discovery atomic
  1227     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
  1229     // Temporarily clear _is_alive_non_header
  1230     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
  1232     ref_processor()->enable_discovery();
  1233     ref_processor()->setup_policy(do_clear_all_soft_refs);
  1235     // Do collection work
  1237       HandleMark hm;  // Discard invalid handles created during gc
  1238       G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
  1240     assert(free_regions() == 0, "we should not have added any free regions");
  1241     rebuild_region_lists();
  1243     _summary_bytes_used = recalculate_used();
  1245     ref_processor()->enqueue_discovered_references();
  1247     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1249     MemoryService::track_memory_usage();
  1251     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  1252       HandleMark hm;  // Discard invalid handles created during verification
  1253       gclog_or_tty->print(" VerifyAfterGC:");
  1254       prepare_for_verify();
  1255       Universe::verify(false);
  1257     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
  1259     reset_gc_time_stamp();
  1260     // Since everything potentially moved, we will clear all remembered
  1261     // sets, and clear all cards.  Later we will rebuild remebered
  1262     // sets. We will also reset the GC time stamps of the regions.
  1263     PostMCRemSetClearClosure rs_clear(mr_bs());
  1264     heap_region_iterate(&rs_clear);
  1266     // Resize the heap if necessary.
  1267     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  1269     if (_cg1r->use_cache()) {
  1270       _cg1r->clear_and_record_card_counts();
  1271       _cg1r->clear_hot_cache();
  1274     // Rebuild remembered sets of all regions.
  1276     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1277       ParRebuildRSTask rebuild_rs_task(this);
  1278       assert(check_heap_region_claim_values(
  1279              HeapRegion::InitialClaimValue), "sanity check");
  1280       set_par_threads(workers()->total_workers());
  1281       workers()->run_task(&rebuild_rs_task);
  1282       set_par_threads(0);
  1283       assert(check_heap_region_claim_values(
  1284              HeapRegion::RebuildRSClaimValue), "sanity check");
  1285       reset_heap_region_claim_values();
  1286     } else {
  1287       RebuildRSOutOfRegionClosure rebuild_rs(this);
  1288       heap_region_iterate(&rebuild_rs);
  1291     if (PrintGC) {
  1292       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  1295     if (true) { // FIXME
  1296       // Ask the permanent generation to adjust size for full collections
  1297       perm()->compute_new_size();
  1300     // Start a new incremental collection set for the next pause
  1301     assert(g1_policy()->collection_set() == NULL, "must be");
  1302     g1_policy()->start_incremental_cset_building();
  1304     // Clear the _cset_fast_test bitmap in anticipation of adding
  1305     // regions to the incremental collection set for the next
  1306     // evacuation pause.
  1307     clear_cset_fast_test();
  1309     init_mutator_alloc_region();
  1311     double end = os::elapsedTime();
  1312     g1_policy()->record_full_collection_end();
  1314 #ifdef TRACESPINNING
  1315     ParallelTaskTerminator::print_termination_counts();
  1316 #endif
  1318     gc_epilogue(true);
  1320     // Discard all rset updates
  1321     JavaThread::dirty_card_queue_set().abandon_logs();
  1322     assert(!G1DeferredRSUpdate
  1323            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1326   if (g1_policy()->in_young_gc_mode()) {
  1327     _young_list->reset_sampled_info();
  1328     // At this point there should be no regions in the
  1329     // entire heap tagged as young.
  1330     assert( check_young_list_empty(true /* check_heap */),
  1331             "young list should be empty at this point");
  1334   // Update the number of full collections that have been completed.
  1335   increment_full_collections_completed(false /* concurrent */);
  1337   verify_region_sets_optional();
  1339   if (PrintHeapAtGC) {
  1340     Universe::print_heap_after_gc();
  1343   return true;
  1346 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1347   // do_collection() will return whether it succeeded in performing
  1348   // the GC. Currently, there is no facility on the
  1349   // do_full_collection() API to notify the caller than the collection
  1350   // did not succeed (e.g., because it was locked out by the GC
  1351   // locker). So, right now, we'll ignore the return value.
  1352   bool dummy = do_collection(true,                /* explicit_gc */
  1353                              clear_all_soft_refs,
  1354                              0                    /* word_size */);
  1357 // This code is mostly copied from TenuredGeneration.
  1358 void
  1359 G1CollectedHeap::
  1360 resize_if_necessary_after_full_collection(size_t word_size) {
  1361   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
  1363   // Include the current allocation, if any, and bytes that will be
  1364   // pre-allocated to support collections, as "used".
  1365   const size_t used_after_gc = used();
  1366   const size_t capacity_after_gc = capacity();
  1367   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1369   // This is enforced in arguments.cpp.
  1370   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
  1371          "otherwise the code below doesn't make sense");
  1373   // We don't have floating point command-line arguments
  1374   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
  1375   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1376   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
  1377   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1379   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
  1380   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
  1382   // We have to be careful here as these two calculations can overflow
  1383   // 32-bit size_t's.
  1384   double used_after_gc_d = (double) used_after_gc;
  1385   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
  1386   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
  1388   // Let's make sure that they are both under the max heap size, which
  1389   // by default will make them fit into a size_t.
  1390   double desired_capacity_upper_bound = (double) max_heap_size;
  1391   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
  1392                                     desired_capacity_upper_bound);
  1393   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
  1394                                     desired_capacity_upper_bound);
  1396   // We can now safely turn them into size_t's.
  1397   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
  1398   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
  1400   // This assert only makes sense here, before we adjust them
  1401   // with respect to the min and max heap size.
  1402   assert(minimum_desired_capacity <= maximum_desired_capacity,
  1403          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
  1404                  "maximum_desired_capacity = "SIZE_FORMAT,
  1405                  minimum_desired_capacity, maximum_desired_capacity));
  1407   // Should not be greater than the heap max size. No need to adjust
  1408   // it with respect to the heap min size as it's a lower bound (i.e.,
  1409   // we'll try to make the capacity larger than it, not smaller).
  1410   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
  1411   // Should not be less than the heap min size. No need to adjust it
  1412   // with respect to the heap max size as it's an upper bound (i.e.,
  1413   // we'll try to make the capacity smaller than it, not greater).
  1414   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
  1416   if (PrintGC && Verbose) {
  1417     const double free_percentage =
  1418       (double) free_after_gc / (double) capacity_after_gc;
  1419     gclog_or_tty->print_cr("Computing new size after full GC ");
  1420     gclog_or_tty->print_cr("  "
  1421                            "  minimum_free_percentage: %6.2f",
  1422                            minimum_free_percentage);
  1423     gclog_or_tty->print_cr("  "
  1424                            "  maximum_free_percentage: %6.2f",
  1425                            maximum_free_percentage);
  1426     gclog_or_tty->print_cr("  "
  1427                            "  capacity: %6.1fK"
  1428                            "  minimum_desired_capacity: %6.1fK"
  1429                            "  maximum_desired_capacity: %6.1fK",
  1430                            (double) capacity_after_gc / (double) K,
  1431                            (double) minimum_desired_capacity / (double) K,
  1432                            (double) maximum_desired_capacity / (double) K);
  1433     gclog_or_tty->print_cr("  "
  1434                            "  free_after_gc: %6.1fK"
  1435                            "  used_after_gc: %6.1fK",
  1436                            (double) free_after_gc / (double) K,
  1437                            (double) used_after_gc / (double) K);
  1438     gclog_or_tty->print_cr("  "
  1439                            "   free_percentage: %6.2f",
  1440                            free_percentage);
  1442   if (capacity_after_gc < minimum_desired_capacity) {
  1443     // Don't expand unless it's significant
  1444     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1445     if (expand(expand_bytes)) {
  1446       if (PrintGC && Verbose) {
  1447         gclog_or_tty->print_cr("  "
  1448                                "  expanding:"
  1449                                "  max_heap_size: %6.1fK"
  1450                                "  minimum_desired_capacity: %6.1fK"
  1451                                "  expand_bytes: %6.1fK",
  1452                                (double) max_heap_size / (double) K,
  1453                                (double) minimum_desired_capacity / (double) K,
  1454                                (double) expand_bytes / (double) K);
  1458     // No expansion, now see if we want to shrink
  1459   } else if (capacity_after_gc > maximum_desired_capacity) {
  1460     // Capacity too large, compute shrinking size
  1461     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1462     shrink(shrink_bytes);
  1463     if (PrintGC && Verbose) {
  1464       gclog_or_tty->print_cr("  "
  1465                              "  shrinking:"
  1466                              "  min_heap_size: %6.1fK"
  1467                              "  maximum_desired_capacity: %6.1fK"
  1468                              "  shrink_bytes: %6.1fK",
  1469                              (double) min_heap_size / (double) K,
  1470                              (double) maximum_desired_capacity / (double) K,
  1471                              (double) shrink_bytes / (double) K);
  1477 HeapWord*
  1478 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
  1479                                            bool* succeeded) {
  1480   assert_at_safepoint(true /* should_be_vm_thread */);
  1482   *succeeded = true;
  1483   // Let's attempt the allocation first.
  1484   HeapWord* result =
  1485     attempt_allocation_at_safepoint(word_size,
  1486                                  false /* expect_null_mutator_alloc_region */);
  1487   if (result != NULL) {
  1488     assert(*succeeded, "sanity");
  1489     return result;
  1492   // In a G1 heap, we're supposed to keep allocation from failing by
  1493   // incremental pauses.  Therefore, at least for now, we'll favor
  1494   // expansion over collection.  (This might change in the future if we can
  1495   // do something smarter than full collection to satisfy a failed alloc.)
  1496   result = expand_and_allocate(word_size);
  1497   if (result != NULL) {
  1498     assert(*succeeded, "sanity");
  1499     return result;
  1502   // Expansion didn't work, we'll try to do a Full GC.
  1503   bool gc_succeeded = do_collection(false, /* explicit_gc */
  1504                                     false, /* clear_all_soft_refs */
  1505                                     word_size);
  1506   if (!gc_succeeded) {
  1507     *succeeded = false;
  1508     return NULL;
  1511   // Retry the allocation
  1512   result = attempt_allocation_at_safepoint(word_size,
  1513                                   true /* expect_null_mutator_alloc_region */);
  1514   if (result != NULL) {
  1515     assert(*succeeded, "sanity");
  1516     return result;
  1519   // Then, try a Full GC that will collect all soft references.
  1520   gc_succeeded = do_collection(false, /* explicit_gc */
  1521                                true,  /* clear_all_soft_refs */
  1522                                word_size);
  1523   if (!gc_succeeded) {
  1524     *succeeded = false;
  1525     return NULL;
  1528   // Retry the allocation once more
  1529   result = attempt_allocation_at_safepoint(word_size,
  1530                                   true /* expect_null_mutator_alloc_region */);
  1531   if (result != NULL) {
  1532     assert(*succeeded, "sanity");
  1533     return result;
  1536   assert(!collector_policy()->should_clear_all_soft_refs(),
  1537          "Flag should have been handled and cleared prior to this point");
  1539   // What else?  We might try synchronous finalization later.  If the total
  1540   // space available is large enough for the allocation, then a more
  1541   // complete compaction phase than we've tried so far might be
  1542   // appropriate.
  1543   assert(*succeeded, "sanity");
  1544   return NULL;
  1547 // Attempting to expand the heap sufficiently
  1548 // to support an allocation of the given "word_size".  If
  1549 // successful, perform the allocation and return the address of the
  1550 // allocated block, or else "NULL".
  1552 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1553   assert_at_safepoint(true /* should_be_vm_thread */);
  1555   verify_region_sets_optional();
  1557   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
  1558   if (expand(expand_bytes)) {
  1559     verify_region_sets_optional();
  1560     return attempt_allocation_at_safepoint(word_size,
  1561                                  false /* expect_null_mutator_alloc_region */);
  1563   return NULL;
  1566 bool G1CollectedHeap::expand(size_t expand_bytes) {
  1567   size_t old_mem_size = _g1_storage.committed_size();
  1568   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
  1569   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1570                                        HeapRegion::GrainBytes);
  1572   if (Verbose && PrintGC) {
  1573     gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK",
  1574                            old_mem_size/K, aligned_expand_bytes/K);
  1577   HeapWord* old_end = (HeapWord*)_g1_storage.high();
  1578   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
  1579   if (successful) {
  1580     HeapWord* new_end = (HeapWord*)_g1_storage.high();
  1582     // Expand the committed region.
  1583     _g1_committed.set_end(new_end);
  1585     // Tell the cardtable about the expansion.
  1586     Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1588     // And the offset table as well.
  1589     _bot_shared->resize(_g1_committed.word_size());
  1591     expand_bytes = aligned_expand_bytes;
  1592     HeapWord* base = old_end;
  1594     // Create the heap regions for [old_end, new_end)
  1595     while (expand_bytes > 0) {
  1596       HeapWord* high = base + HeapRegion::GrainWords;
  1598       // Create a new HeapRegion.
  1599       MemRegion mr(base, high);
  1600       bool is_zeroed = !_g1_max_committed.contains(base);
  1601       HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
  1603       // Add it to the HeapRegionSeq.
  1604       _hrs->insert(hr);
  1605       _free_list.add_as_tail(hr);
  1607       // And we used up an expansion region to create it.
  1608       _expansion_regions--;
  1610       expand_bytes -= HeapRegion::GrainBytes;
  1611       base += HeapRegion::GrainWords;
  1613     assert(base == new_end, "sanity");
  1615     // Now update max_committed if necessary.
  1616     _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end));
  1618   } else {
  1619     // The expansion of the virtual storage space was unsuccessful.
  1620     // Let's see if it was because we ran out of swap.
  1621     if (G1ExitOnExpansionFailure &&
  1622         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
  1623       // We had head room...
  1624       vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
  1628   if (Verbose && PrintGC) {
  1629     size_t new_mem_size = _g1_storage.committed_size();
  1630     gclog_or_tty->print_cr("...%s, expanded to %ldK",
  1631                            (successful ? "Successful" : "Failed"),
  1632                            new_mem_size/K);
  1634   return successful;
  1637 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
  1639   size_t old_mem_size = _g1_storage.committed_size();
  1640   size_t aligned_shrink_bytes =
  1641     ReservedSpace::page_align_size_down(shrink_bytes);
  1642   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1643                                          HeapRegion::GrainBytes);
  1644   size_t num_regions_deleted = 0;
  1645   MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
  1647   assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1648   if (mr.byte_size() > 0)
  1649     _g1_storage.shrink_by(mr.byte_size());
  1650   assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1652   _g1_committed.set_end(mr.start());
  1653   _expansion_regions += num_regions_deleted;
  1655   // Tell the cardtable about it.
  1656   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1658   // And the offset table as well.
  1659   _bot_shared->resize(_g1_committed.word_size());
  1661   HeapRegionRemSet::shrink_heap(n_regions());
  1663   if (Verbose && PrintGC) {
  1664     size_t new_mem_size = _g1_storage.committed_size();
  1665     gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
  1666                            old_mem_size/K, aligned_shrink_bytes/K,
  1667                            new_mem_size/K);
  1671 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1672   verify_region_sets_optional();
  1674   release_gc_alloc_regions(true /* totally */);
  1675   // Instead of tearing down / rebuilding the free lists here, we
  1676   // could instead use the remove_all_pending() method on free_list to
  1677   // remove only the ones that we need to remove.
  1678   tear_down_region_lists();  // We will rebuild them in a moment.
  1679   shrink_helper(shrink_bytes);
  1680   rebuild_region_lists();
  1682   verify_region_sets_optional();
  1685 // Public methods.
  1687 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1688 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1689 #endif // _MSC_VER
  1692 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1693   SharedHeap(policy_),
  1694   _g1_policy(policy_),
  1695   _dirty_card_queue_set(false),
  1696   _into_cset_dirty_card_queue_set(false),
  1697   _is_alive_closure(this),
  1698   _ref_processor(NULL),
  1699   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1700   _bot_shared(NULL),
  1701   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1702   _evac_failure_scan_stack(NULL) ,
  1703   _mark_in_progress(false),
  1704   _cg1r(NULL), _summary_bytes_used(0),
  1705   _refine_cte_cl(NULL),
  1706   _full_collection(false),
  1707   _free_list("Master Free List"),
  1708   _secondary_free_list("Secondary Free List"),
  1709   _humongous_set("Master Humongous Set"),
  1710   _free_regions_coming(false),
  1711   _young_list(new YoungList(this)),
  1712   _gc_time_stamp(0),
  1713   _surviving_young_words(NULL),
  1714   _full_collections_completed(0),
  1715   _in_cset_fast_test(NULL),
  1716   _in_cset_fast_test_base(NULL),
  1717   _dirty_cards_region_list(NULL) {
  1718   _g1h = this; // To catch bugs.
  1719   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1720     vm_exit_during_initialization("Failed necessary allocation.");
  1723   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
  1725   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1726   _task_queues = new RefToScanQueueSet(n_queues);
  1728   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1729   assert(n_rem_sets > 0, "Invariant.");
  1731   HeapRegionRemSetIterator** iter_arr =
  1732     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
  1733   for (int i = 0; i < n_queues; i++) {
  1734     iter_arr[i] = new HeapRegionRemSetIterator();
  1736   _rem_set_iterator = iter_arr;
  1738   for (int i = 0; i < n_queues; i++) {
  1739     RefToScanQueue* q = new RefToScanQueue();
  1740     q->initialize();
  1741     _task_queues->register_queue(i, q);
  1744   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  1745     _gc_alloc_regions[ap]          = NULL;
  1746     _gc_alloc_region_counts[ap]    = 0;
  1747     _retained_gc_alloc_regions[ap] = NULL;
  1748     // by default, we do not retain a GC alloc region for each ap;
  1749     // we'll override this, when appropriate, below
  1750     _retain_gc_alloc_region[ap]    = false;
  1753   // We will try to remember the last half-full tenured region we
  1754   // allocated to at the end of a collection so that we can re-use it
  1755   // during the next collection.
  1756   _retain_gc_alloc_region[GCAllocForTenured]  = true;
  1758   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1761 jint G1CollectedHeap::initialize() {
  1762   CollectedHeap::pre_initialize();
  1763   os::enable_vtime();
  1765   // Necessary to satisfy locking discipline assertions.
  1767   MutexLocker x(Heap_lock);
  1769   // While there are no constraints in the GC code that HeapWordSize
  1770   // be any particular value, there are multiple other areas in the
  1771   // system which believe this to be true (e.g. oop->object_size in some
  1772   // cases incorrectly returns the size in wordSize units rather than
  1773   // HeapWordSize).
  1774   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  1776   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  1777   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  1779   // Ensure that the sizes are properly aligned.
  1780   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1781   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1783   _cg1r = new ConcurrentG1Refine();
  1785   // Reserve the maximum.
  1786   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
  1787   // Includes the perm-gen.
  1789   const size_t total_reserved = max_byte_size + pgs->max_size();
  1790   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
  1792   ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
  1793                         HeapRegion::GrainBytes,
  1794                         UseLargePages, addr);
  1796   if (UseCompressedOops) {
  1797     if (addr != NULL && !heap_rs.is_reserved()) {
  1798       // Failed to reserve at specified address - the requested memory
  1799       // region is taken already, for example, by 'java' launcher.
  1800       // Try again to reserver heap higher.
  1801       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
  1802       ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
  1803                              UseLargePages, addr);
  1804       if (addr != NULL && !heap_rs0.is_reserved()) {
  1805         // Failed to reserve at specified address again - give up.
  1806         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
  1807         assert(addr == NULL, "");
  1808         ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
  1809                                UseLargePages, addr);
  1810         heap_rs = heap_rs1;
  1811       } else {
  1812         heap_rs = heap_rs0;
  1817   if (!heap_rs.is_reserved()) {
  1818     vm_exit_during_initialization("Could not reserve enough space for object heap");
  1819     return JNI_ENOMEM;
  1822   // It is important to do this in a way such that concurrent readers can't
  1823   // temporarily think somethings in the heap.  (I've actually seen this
  1824   // happen in asserts: DLD.)
  1825   _reserved.set_word_size(0);
  1826   _reserved.set_start((HeapWord*)heap_rs.base());
  1827   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  1829   _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
  1831   // Create the gen rem set (and barrier set) for the entire reserved region.
  1832   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  1833   set_barrier_set(rem_set()->bs());
  1834   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  1835     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  1836   } else {
  1837     vm_exit_during_initialization("G1 requires a mod ref bs.");
  1838     return JNI_ENOMEM;
  1841   // Also create a G1 rem set.
  1842   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
  1843     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
  1844   } else {
  1845     vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
  1846     return JNI_ENOMEM;
  1849   // Carve out the G1 part of the heap.
  1851   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  1852   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  1853                            g1_rs.size()/HeapWordSize);
  1854   ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
  1856   _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
  1858   _g1_storage.initialize(g1_rs, 0);
  1859   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  1860   _g1_max_committed = _g1_committed;
  1861   _hrs = new HeapRegionSeq(_expansion_regions);
  1862   guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
  1864   // 6843694 - ensure that the maximum region index can fit
  1865   // in the remembered set structures.
  1866   const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  1867   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  1869   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  1870   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  1871   guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
  1872             "too many cards per region");
  1874   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
  1876   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  1877                                              heap_word_size(init_byte_size));
  1879   _g1h = this;
  1881    _in_cset_fast_test_length = max_regions();
  1882    _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
  1884    // We're biasing _in_cset_fast_test to avoid subtracting the
  1885    // beginning of the heap every time we want to index; basically
  1886    // it's the same with what we do with the card table.
  1887    _in_cset_fast_test = _in_cset_fast_test_base -
  1888                 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  1890    // Clear the _cset_fast_test bitmap in anticipation of adding
  1891    // regions to the incremental collection set for the first
  1892    // evacuation pause.
  1893    clear_cset_fast_test();
  1895   // Create the ConcurrentMark data structure and thread.
  1896   // (Must do this late, so that "max_regions" is defined.)
  1897   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
  1898   _cmThread = _cm->cmThread();
  1900   // Initialize the from_card cache structure of HeapRegionRemSet.
  1901   HeapRegionRemSet::init_heap(max_regions());
  1903   // Now expand into the initial heap size.
  1904   if (!expand(init_byte_size)) {
  1905     vm_exit_during_initialization("Failed to allocate initial heap.");
  1906     return JNI_ENOMEM;
  1909   // Perform any initialization actions delegated to the policy.
  1910   g1_policy()->init();
  1912   g1_policy()->note_start_of_mark_thread();
  1914   _refine_cte_cl =
  1915     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
  1916                                     g1_rem_set(),
  1917                                     concurrent_g1_refine());
  1918   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
  1920   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  1921                                                SATB_Q_FL_lock,
  1922                                                G1SATBProcessCompletedThreshold,
  1923                                                Shared_SATB_Q_lock);
  1925   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1926                                                 DirtyCardQ_FL_lock,
  1927                                                 concurrent_g1_refine()->yellow_zone(),
  1928                                                 concurrent_g1_refine()->red_zone(),
  1929                                                 Shared_DirtyCardQ_lock);
  1931   if (G1DeferredRSUpdate) {
  1932     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1933                                       DirtyCardQ_FL_lock,
  1934                                       -1, // never trigger processing
  1935                                       -1, // no limit on length
  1936                                       Shared_DirtyCardQ_lock,
  1937                                       &JavaThread::dirty_card_queue_set());
  1940   // Initialize the card queue set used to hold cards containing
  1941   // references into the collection set.
  1942   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
  1943                                              DirtyCardQ_FL_lock,
  1944                                              -1, // never trigger processing
  1945                                              -1, // no limit on length
  1946                                              Shared_DirtyCardQ_lock,
  1947                                              &JavaThread::dirty_card_queue_set());
  1949   // In case we're keeping closure specialization stats, initialize those
  1950   // counts and that mechanism.
  1951   SpecializationStats::clear();
  1953   _gc_alloc_region_list = NULL;
  1955   // Do later initialization work for concurrent refinement.
  1956   _cg1r->init();
  1958   // Here we allocate the dummy full region that is required by the
  1959   // G1AllocRegion class. If we don't pass an address in the reserved
  1960   // space here, lots of asserts fire.
  1961   MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords);
  1962   HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true);
  1963   // We'll re-use the same region whether the alloc region will
  1964   // require BOT updates or not and, if it doesn't, then a non-young
  1965   // region will complain that it cannot support allocations without
  1966   // BOT updates. So we'll tag the dummy region as young to avoid that.
  1967   dummy_region->set_young();
  1968   // Make sure it's full.
  1969   dummy_region->set_top(dummy_region->end());
  1970   G1AllocRegion::setup(this, dummy_region);
  1972   init_mutator_alloc_region();
  1974   return JNI_OK;
  1977 void G1CollectedHeap::ref_processing_init() {
  1978   // Reference processing in G1 currently works as follows:
  1979   //
  1980   // * There is only one reference processor instance that
  1981   //   'spans' the entire heap. It is created by the code
  1982   //   below.
  1983   // * Reference discovery is not enabled during an incremental
  1984   //   pause (see 6484982).
  1985   // * Discoverered refs are not enqueued nor are they processed
  1986   //   during an incremental pause (see 6484982).
  1987   // * Reference discovery is enabled at initial marking.
  1988   // * Reference discovery is disabled and the discovered
  1989   //   references processed etc during remarking.
  1990   // * Reference discovery is MT (see below).
  1991   // * Reference discovery requires a barrier (see below).
  1992   // * Reference processing is currently not MT (see 6608385).
  1993   // * A full GC enables (non-MT) reference discovery and
  1994   //   processes any discovered references.
  1996   SharedHeap::ref_processing_init();
  1997   MemRegion mr = reserved_region();
  1998   _ref_processor =
  1999     new ReferenceProcessor(mr,    // span
  2000                            ParallelRefProcEnabled && (ParallelGCThreads > 1),    // mt processing
  2001                            (int) ParallelGCThreads,   // degree of mt processing
  2002                            ParallelGCThreads > 1 || ConcGCThreads > 1,  // mt discovery
  2003                            (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
  2004                            false,                     // Reference discovery is not atomic
  2005                            &_is_alive_closure,        // is alive closure for efficiency
  2006                            true);                     // Setting next fields of discovered
  2007                                                       // lists requires a barrier.
  2010 size_t G1CollectedHeap::capacity() const {
  2011   return _g1_committed.byte_size();
  2014 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  2015                                                  DirtyCardQueue* into_cset_dcq,
  2016                                                  bool concurrent,
  2017                                                  int worker_i) {
  2018   // Clean cards in the hot card cache
  2019   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
  2021   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2022   int n_completed_buffers = 0;
  2023   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
  2024     n_completed_buffers++;
  2026   g1_policy()->record_update_rs_processed_buffers(worker_i,
  2027                                                   (double) n_completed_buffers);
  2028   dcqs.clear_n_completed_buffers();
  2029   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  2033 // Computes the sum of the storage used by the various regions.
  2035 size_t G1CollectedHeap::used() const {
  2036   assert(Heap_lock->owner() != NULL,
  2037          "Should be owned on this thread's behalf.");
  2038   size_t result = _summary_bytes_used;
  2039   // Read only once in case it is set to NULL concurrently
  2040   HeapRegion* hr = _mutator_alloc_region.get();
  2041   if (hr != NULL)
  2042     result += hr->used();
  2043   return result;
  2046 size_t G1CollectedHeap::used_unlocked() const {
  2047   size_t result = _summary_bytes_used;
  2048   return result;
  2051 class SumUsedClosure: public HeapRegionClosure {
  2052   size_t _used;
  2053 public:
  2054   SumUsedClosure() : _used(0) {}
  2055   bool doHeapRegion(HeapRegion* r) {
  2056     if (!r->continuesHumongous()) {
  2057       _used += r->used();
  2059     return false;
  2061   size_t result() { return _used; }
  2062 };
  2064 size_t G1CollectedHeap::recalculate_used() const {
  2065   SumUsedClosure blk;
  2066   _hrs->iterate(&blk);
  2067   return blk.result();
  2070 #ifndef PRODUCT
  2071 class SumUsedRegionsClosure: public HeapRegionClosure {
  2072   size_t _num;
  2073 public:
  2074   SumUsedRegionsClosure() : _num(0) {}
  2075   bool doHeapRegion(HeapRegion* r) {
  2076     if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
  2077       _num += 1;
  2079     return false;
  2081   size_t result() { return _num; }
  2082 };
  2084 size_t G1CollectedHeap::recalculate_used_regions() const {
  2085   SumUsedRegionsClosure blk;
  2086   _hrs->iterate(&blk);
  2087   return blk.result();
  2089 #endif // PRODUCT
  2091 size_t G1CollectedHeap::unsafe_max_alloc() {
  2092   if (free_regions() > 0) return HeapRegion::GrainBytes;
  2093   // otherwise, is there space in the current allocation region?
  2095   // We need to store the current allocation region in a local variable
  2096   // here. The problem is that this method doesn't take any locks and
  2097   // there may be other threads which overwrite the current allocation
  2098   // region field. attempt_allocation(), for example, sets it to NULL
  2099   // and this can happen *after* the NULL check here but before the call
  2100   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  2101   // to be a problem in the optimized build, since the two loads of the
  2102   // current allocation region field are optimized away.
  2103   HeapRegion* hr = _mutator_alloc_region.get();
  2104   if (hr == NULL) {
  2105     return 0;
  2107   return hr->free();
  2110 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
  2111   return
  2112     ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
  2113      (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
  2116 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
  2117   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  2119   // We assume that if concurrent == true, then the caller is a
  2120   // concurrent thread that was joined the Suspendible Thread
  2121   // Set. If there's ever a cheap way to check this, we should add an
  2122   // assert here.
  2124   // We have already incremented _total_full_collections at the start
  2125   // of the GC, so total_full_collections() represents how many full
  2126   // collections have been started.
  2127   unsigned int full_collections_started = total_full_collections();
  2129   // Given that this method is called at the end of a Full GC or of a
  2130   // concurrent cycle, and those can be nested (i.e., a Full GC can
  2131   // interrupt a concurrent cycle), the number of full collections
  2132   // completed should be either one (in the case where there was no
  2133   // nesting) or two (when a Full GC interrupted a concurrent cycle)
  2134   // behind the number of full collections started.
  2136   // This is the case for the inner caller, i.e. a Full GC.
  2137   assert(concurrent ||
  2138          (full_collections_started == _full_collections_completed + 1) ||
  2139          (full_collections_started == _full_collections_completed + 2),
  2140          err_msg("for inner caller (Full GC): full_collections_started = %u "
  2141                  "is inconsistent with _full_collections_completed = %u",
  2142                  full_collections_started, _full_collections_completed));
  2144   // This is the case for the outer caller, i.e. the concurrent cycle.
  2145   assert(!concurrent ||
  2146          (full_collections_started == _full_collections_completed + 1),
  2147          err_msg("for outer caller (concurrent cycle): "
  2148                  "full_collections_started = %u "
  2149                  "is inconsistent with _full_collections_completed = %u",
  2150                  full_collections_started, _full_collections_completed));
  2152   _full_collections_completed += 1;
  2154   // We need to clear the "in_progress" flag in the CM thread before
  2155   // we wake up any waiters (especially when ExplicitInvokesConcurrent
  2156   // is set) so that if a waiter requests another System.gc() it doesn't
  2157   // incorrectly see that a marking cyle is still in progress.
  2158   if (concurrent) {
  2159     _cmThread->clear_in_progress();
  2162   // This notify_all() will ensure that a thread that called
  2163   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2164   // and it's waiting for a full GC to finish will be woken up. It is
  2165   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2166   FullGCCount_lock->notify_all();
  2169 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  2170   assert_at_safepoint(true /* should_be_vm_thread */);
  2171   GCCauseSetter gcs(this, cause);
  2172   switch (cause) {
  2173     case GCCause::_heap_inspection:
  2174     case GCCause::_heap_dump: {
  2175       HandleMark hm;
  2176       do_full_collection(false);         // don't clear all soft refs
  2177       break;
  2179     default: // XXX FIX ME
  2180       ShouldNotReachHere(); // Unexpected use of this function
  2184 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2185   // The caller doesn't have the Heap_lock
  2186   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  2188   unsigned int gc_count_before;
  2189   unsigned int full_gc_count_before;
  2191     MutexLocker ml(Heap_lock);
  2193     // Read the GC count while holding the Heap_lock
  2194     gc_count_before = SharedHeap::heap()->total_collections();
  2195     full_gc_count_before = SharedHeap::heap()->total_full_collections();
  2198   if (should_do_concurrent_full_gc(cause)) {
  2199     // Schedule an initial-mark evacuation pause that will start a
  2200     // concurrent cycle. We're setting word_size to 0 which means that
  2201     // we are not requesting a post-GC allocation.
  2202     VM_G1IncCollectionPause op(gc_count_before,
  2203                                0,     /* word_size */
  2204                                true,  /* should_initiate_conc_mark */
  2205                                g1_policy()->max_pause_time_ms(),
  2206                                cause);
  2207     VMThread::execute(&op);
  2208   } else {
  2209     if (cause == GCCause::_gc_locker
  2210         DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2212       // Schedule a standard evacuation pause. We're setting word_size
  2213       // to 0 which means that we are not requesting a post-GC allocation.
  2214       VM_G1IncCollectionPause op(gc_count_before,
  2215                                  0,     /* word_size */
  2216                                  false, /* should_initiate_conc_mark */
  2217                                  g1_policy()->max_pause_time_ms(),
  2218                                  cause);
  2219       VMThread::execute(&op);
  2220     } else {
  2221       // Schedule a Full GC.
  2222       VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
  2223       VMThread::execute(&op);
  2228 bool G1CollectedHeap::is_in(const void* p) const {
  2229   if (_g1_committed.contains(p)) {
  2230     HeapRegion* hr = _hrs->addr_to_region(p);
  2231     return hr->is_in(p);
  2232   } else {
  2233     return _perm_gen->as_gen()->is_in(p);
  2237 // Iteration functions.
  2239 // Iterates an OopClosure over all ref-containing fields of objects
  2240 // within a HeapRegion.
  2242 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  2243   MemRegion _mr;
  2244   OopClosure* _cl;
  2245 public:
  2246   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  2247     : _mr(mr), _cl(cl) {}
  2248   bool doHeapRegion(HeapRegion* r) {
  2249     if (! r->continuesHumongous()) {
  2250       r->oop_iterate(_cl);
  2252     return false;
  2254 };
  2256 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
  2257   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  2258   _hrs->iterate(&blk);
  2259   if (do_perm) {
  2260     perm_gen()->oop_iterate(cl);
  2264 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
  2265   IterateOopClosureRegionClosure blk(mr, cl);
  2266   _hrs->iterate(&blk);
  2267   if (do_perm) {
  2268     perm_gen()->oop_iterate(cl);
  2272 // Iterates an ObjectClosure over all objects within a HeapRegion.
  2274 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  2275   ObjectClosure* _cl;
  2276 public:
  2277   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  2278   bool doHeapRegion(HeapRegion* r) {
  2279     if (! r->continuesHumongous()) {
  2280       r->object_iterate(_cl);
  2282     return false;
  2284 };
  2286 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
  2287   IterateObjectClosureRegionClosure blk(cl);
  2288   _hrs->iterate(&blk);
  2289   if (do_perm) {
  2290     perm_gen()->object_iterate(cl);
  2294 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
  2295   // FIXME: is this right?
  2296   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
  2299 // Calls a SpaceClosure on a HeapRegion.
  2301 class SpaceClosureRegionClosure: public HeapRegionClosure {
  2302   SpaceClosure* _cl;
  2303 public:
  2304   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  2305   bool doHeapRegion(HeapRegion* r) {
  2306     _cl->do_space(r);
  2307     return false;
  2309 };
  2311 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  2312   SpaceClosureRegionClosure blk(cl);
  2313   _hrs->iterate(&blk);
  2316 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
  2317   _hrs->iterate(cl);
  2320 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
  2321                                                HeapRegionClosure* cl) {
  2322   _hrs->iterate_from(r, cl);
  2325 void
  2326 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
  2327   _hrs->iterate_from(idx, cl);
  2330 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
  2332 void
  2333 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  2334                                                  int worker,
  2335                                                  jint claim_value) {
  2336   const size_t regions = n_regions();
  2337   const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1);
  2338   // try to spread out the starting points of the workers
  2339   const size_t start_index = regions / worker_num * (size_t) worker;
  2341   // each worker will actually look at all regions
  2342   for (size_t count = 0; count < regions; ++count) {
  2343     const size_t index = (start_index + count) % regions;
  2344     assert(0 <= index && index < regions, "sanity");
  2345     HeapRegion* r = region_at(index);
  2346     // we'll ignore "continues humongous" regions (we'll process them
  2347     // when we come across their corresponding "start humongous"
  2348     // region) and regions already claimed
  2349     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  2350       continue;
  2352     // OK, try to claim it
  2353     if (r->claimHeapRegion(claim_value)) {
  2354       // success!
  2355       assert(!r->continuesHumongous(), "sanity");
  2356       if (r->startsHumongous()) {
  2357         // If the region is "starts humongous" we'll iterate over its
  2358         // "continues humongous" first; in fact we'll do them
  2359         // first. The order is important. In on case, calling the
  2360         // closure on the "starts humongous" region might de-allocate
  2361         // and clear all its "continues humongous" regions and, as a
  2362         // result, we might end up processing them twice. So, we'll do
  2363         // them first (notice: most closures will ignore them anyway) and
  2364         // then we'll do the "starts humongous" region.
  2365         for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
  2366           HeapRegion* chr = region_at(ch_index);
  2368           // if the region has already been claimed or it's not
  2369           // "continues humongous" we're done
  2370           if (chr->claim_value() == claim_value ||
  2371               !chr->continuesHumongous()) {
  2372             break;
  2375           // Noone should have claimed it directly. We can given
  2376           // that we claimed its "starts humongous" region.
  2377           assert(chr->claim_value() != claim_value, "sanity");
  2378           assert(chr->humongous_start_region() == r, "sanity");
  2380           if (chr->claimHeapRegion(claim_value)) {
  2381             // we should always be able to claim it; noone else should
  2382             // be trying to claim this region
  2384             bool res2 = cl->doHeapRegion(chr);
  2385             assert(!res2, "Should not abort");
  2387             // Right now, this holds (i.e., no closure that actually
  2388             // does something with "continues humongous" regions
  2389             // clears them). We might have to weaken it in the future,
  2390             // but let's leave these two asserts here for extra safety.
  2391             assert(chr->continuesHumongous(), "should still be the case");
  2392             assert(chr->humongous_start_region() == r, "sanity");
  2393           } else {
  2394             guarantee(false, "we should not reach here");
  2399       assert(!r->continuesHumongous(), "sanity");
  2400       bool res = cl->doHeapRegion(r);
  2401       assert(!res, "Should not abort");
  2406 class ResetClaimValuesClosure: public HeapRegionClosure {
  2407 public:
  2408   bool doHeapRegion(HeapRegion* r) {
  2409     r->set_claim_value(HeapRegion::InitialClaimValue);
  2410     return false;
  2412 };
  2414 void
  2415 G1CollectedHeap::reset_heap_region_claim_values() {
  2416   ResetClaimValuesClosure blk;
  2417   heap_region_iterate(&blk);
  2420 #ifdef ASSERT
  2421 // This checks whether all regions in the heap have the correct claim
  2422 // value. I also piggy-backed on this a check to ensure that the
  2423 // humongous_start_region() information on "continues humongous"
  2424 // regions is correct.
  2426 class CheckClaimValuesClosure : public HeapRegionClosure {
  2427 private:
  2428   jint _claim_value;
  2429   size_t _failures;
  2430   HeapRegion* _sh_region;
  2431 public:
  2432   CheckClaimValuesClosure(jint claim_value) :
  2433     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  2434   bool doHeapRegion(HeapRegion* r) {
  2435     if (r->claim_value() != _claim_value) {
  2436       gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  2437                              "claim value = %d, should be %d",
  2438                              r->bottom(), r->end(), r->claim_value(),
  2439                              _claim_value);
  2440       ++_failures;
  2442     if (!r->isHumongous()) {
  2443       _sh_region = NULL;
  2444     } else if (r->startsHumongous()) {
  2445       _sh_region = r;
  2446     } else if (r->continuesHumongous()) {
  2447       if (r->humongous_start_region() != _sh_region) {
  2448         gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  2449                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  2450                                r->bottom(), r->end(),
  2451                                r->humongous_start_region(),
  2452                                _sh_region);
  2453         ++_failures;
  2456     return false;
  2458   size_t failures() {
  2459     return _failures;
  2461 };
  2463 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  2464   CheckClaimValuesClosure cl(claim_value);
  2465   heap_region_iterate(&cl);
  2466   return cl.failures() == 0;
  2468 #endif // ASSERT
  2470 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2471   HeapRegion* r = g1_policy()->collection_set();
  2472   while (r != NULL) {
  2473     HeapRegion* next = r->next_in_collection_set();
  2474     if (cl->doHeapRegion(r)) {
  2475       cl->incomplete();
  2476       return;
  2478     r = next;
  2482 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2483                                                   HeapRegionClosure *cl) {
  2484   if (r == NULL) {
  2485     // The CSet is empty so there's nothing to do.
  2486     return;
  2489   assert(r->in_collection_set(),
  2490          "Start region must be a member of the collection set.");
  2491   HeapRegion* cur = r;
  2492   while (cur != NULL) {
  2493     HeapRegion* next = cur->next_in_collection_set();
  2494     if (cl->doHeapRegion(cur) && false) {
  2495       cl->incomplete();
  2496       return;
  2498     cur = next;
  2500   cur = g1_policy()->collection_set();
  2501   while (cur != r) {
  2502     HeapRegion* next = cur->next_in_collection_set();
  2503     if (cl->doHeapRegion(cur) && false) {
  2504       cl->incomplete();
  2505       return;
  2507     cur = next;
  2511 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
  2512   return _hrs->length() > 0 ? _hrs->at(0) : NULL;
  2516 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2517   Space* res = heap_region_containing(addr);
  2518   if (res == NULL)
  2519     res = perm_gen()->space_containing(addr);
  2520   return res;
  2523 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2524   Space* sp = space_containing(addr);
  2525   if (sp != NULL) {
  2526     return sp->block_start(addr);
  2528   return NULL;
  2531 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  2532   Space* sp = space_containing(addr);
  2533   assert(sp != NULL, "block_size of address outside of heap");
  2534   return sp->block_size(addr);
  2537 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  2538   Space* sp = space_containing(addr);
  2539   return sp->block_is_obj(addr);
  2542 bool G1CollectedHeap::supports_tlab_allocation() const {
  2543   return true;
  2546 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  2547   return HeapRegion::GrainBytes;
  2550 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  2551   // Return the remaining space in the cur alloc region, but not less than
  2552   // the min TLAB size.
  2554   // Also, this value can be at most the humongous object threshold,
  2555   // since we can't allow tlabs to grow big enough to accomodate
  2556   // humongous objects.
  2558   HeapRegion* hr = _mutator_alloc_region.get();
  2559   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
  2560   if (hr == NULL) {
  2561     return max_tlab_size;
  2562   } else {
  2563     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
  2567 size_t G1CollectedHeap::large_typearray_limit() {
  2568   // FIXME
  2569   return HeapRegion::GrainBytes/HeapWordSize;
  2572 size_t G1CollectedHeap::max_capacity() const {
  2573   return _g1_reserved.byte_size();
  2576 jlong G1CollectedHeap::millis_since_last_gc() {
  2577   // assert(false, "NYI");
  2578   return 0;
  2581 void G1CollectedHeap::prepare_for_verify() {
  2582   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2583     ensure_parsability(false);
  2585   g1_rem_set()->prepare_for_verify();
  2588 class VerifyLivenessOopClosure: public OopClosure {
  2589   G1CollectedHeap* g1h;
  2590 public:
  2591   VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
  2592     g1h = _g1h;
  2594   void do_oop(narrowOop *p) { do_oop_work(p); }
  2595   void do_oop(      oop *p) { do_oop_work(p); }
  2597   template <class T> void do_oop_work(T *p) {
  2598     oop obj = oopDesc::load_decode_heap_oop(p);
  2599     guarantee(obj == NULL || !g1h->is_obj_dead(obj),
  2600               "Dead object referenced by a not dead object");
  2602 };
  2604 class VerifyObjsInRegionClosure: public ObjectClosure {
  2605 private:
  2606   G1CollectedHeap* _g1h;
  2607   size_t _live_bytes;
  2608   HeapRegion *_hr;
  2609   bool _use_prev_marking;
  2610 public:
  2611   // use_prev_marking == true  -> use "prev" marking information,
  2612   // use_prev_marking == false -> use "next" marking information
  2613   VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking)
  2614     : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) {
  2615     _g1h = G1CollectedHeap::heap();
  2617   void do_object(oop o) {
  2618     VerifyLivenessOopClosure isLive(_g1h);
  2619     assert(o != NULL, "Huh?");
  2620     if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
  2621       o->oop_iterate(&isLive);
  2622       if (!_hr->obj_allocated_since_prev_marking(o)) {
  2623         size_t obj_size = o->size();    // Make sure we don't overflow
  2624         _live_bytes += (obj_size * HeapWordSize);
  2628   size_t live_bytes() { return _live_bytes; }
  2629 };
  2631 class PrintObjsInRegionClosure : public ObjectClosure {
  2632   HeapRegion *_hr;
  2633   G1CollectedHeap *_g1;
  2634 public:
  2635   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  2636     _g1 = G1CollectedHeap::heap();
  2637   };
  2639   void do_object(oop o) {
  2640     if (o != NULL) {
  2641       HeapWord *start = (HeapWord *) o;
  2642       size_t word_sz = o->size();
  2643       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  2644                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  2645                           (void*) o, word_sz,
  2646                           _g1->isMarkedPrev(o),
  2647                           _g1->isMarkedNext(o),
  2648                           _hr->obj_allocated_since_prev_marking(o));
  2649       HeapWord *end = start + word_sz;
  2650       HeapWord *cur;
  2651       int *val;
  2652       for (cur = start; cur < end; cur++) {
  2653         val = (int *) cur;
  2654         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  2658 };
  2660 class VerifyRegionClosure: public HeapRegionClosure {
  2661 private:
  2662   bool _allow_dirty;
  2663   bool _par;
  2664   bool _use_prev_marking;
  2665   bool _failures;
  2666 public:
  2667   // use_prev_marking == true  -> use "prev" marking information,
  2668   // use_prev_marking == false -> use "next" marking information
  2669   VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
  2670     : _allow_dirty(allow_dirty),
  2671       _par(par),
  2672       _use_prev_marking(use_prev_marking),
  2673       _failures(false) {}
  2675   bool failures() {
  2676     return _failures;
  2679   bool doHeapRegion(HeapRegion* r) {
  2680     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
  2681               "Should be unclaimed at verify points.");
  2682     if (!r->continuesHumongous()) {
  2683       bool failures = false;
  2684       r->verify(_allow_dirty, _use_prev_marking, &failures);
  2685       if (failures) {
  2686         _failures = true;
  2687       } else {
  2688         VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
  2689         r->object_iterate(&not_dead_yet_cl);
  2690         if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
  2691           gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
  2692                                  "max_live_bytes "SIZE_FORMAT" "
  2693                                  "< calculated "SIZE_FORMAT,
  2694                                  r->bottom(), r->end(),
  2695                                  r->max_live_bytes(),
  2696                                  not_dead_yet_cl.live_bytes());
  2697           _failures = true;
  2701     return false; // stop the region iteration if we hit a failure
  2703 };
  2705 class VerifyRootsClosure: public OopsInGenClosure {
  2706 private:
  2707   G1CollectedHeap* _g1h;
  2708   bool             _use_prev_marking;
  2709   bool             _failures;
  2710 public:
  2711   // use_prev_marking == true  -> use "prev" marking information,
  2712   // use_prev_marking == false -> use "next" marking information
  2713   VerifyRootsClosure(bool use_prev_marking) :
  2714     _g1h(G1CollectedHeap::heap()),
  2715     _use_prev_marking(use_prev_marking),
  2716     _failures(false) { }
  2718   bool failures() { return _failures; }
  2720   template <class T> void do_oop_nv(T* p) {
  2721     T heap_oop = oopDesc::load_heap_oop(p);
  2722     if (!oopDesc::is_null(heap_oop)) {
  2723       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2724       if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
  2725         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  2726                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
  2727         obj->print_on(gclog_or_tty);
  2728         _failures = true;
  2733   void do_oop(oop* p)       { do_oop_nv(p); }
  2734   void do_oop(narrowOop* p) { do_oop_nv(p); }
  2735 };
  2737 // This is the task used for parallel heap verification.
  2739 class G1ParVerifyTask: public AbstractGangTask {
  2740 private:
  2741   G1CollectedHeap* _g1h;
  2742   bool _allow_dirty;
  2743   bool _use_prev_marking;
  2744   bool _failures;
  2746 public:
  2747   // use_prev_marking == true  -> use "prev" marking information,
  2748   // use_prev_marking == false -> use "next" marking information
  2749   G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
  2750                   bool use_prev_marking) :
  2751     AbstractGangTask("Parallel verify task"),
  2752     _g1h(g1h),
  2753     _allow_dirty(allow_dirty),
  2754     _use_prev_marking(use_prev_marking),
  2755     _failures(false) { }
  2757   bool failures() {
  2758     return _failures;
  2761   void work(int worker_i) {
  2762     HandleMark hm;
  2763     VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
  2764     _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
  2765                                           HeapRegion::ParVerifyClaimValue);
  2766     if (blk.failures()) {
  2767       _failures = true;
  2770 };
  2772 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
  2773   verify(allow_dirty, silent, /* use_prev_marking */ true);
  2776 void G1CollectedHeap::verify(bool allow_dirty,
  2777                              bool silent,
  2778                              bool use_prev_marking) {
  2779   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2780     if (!silent) { gclog_or_tty->print("roots "); }
  2781     VerifyRootsClosure rootsCl(use_prev_marking);
  2782     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
  2783     process_strong_roots(true,  // activate StrongRootsScope
  2784                          false,
  2785                          SharedHeap::SO_AllClasses,
  2786                          &rootsCl,
  2787                          &blobsCl,
  2788                          &rootsCl);
  2789     bool failures = rootsCl.failures();
  2790     rem_set()->invalidate(perm_gen()->used_region(), false);
  2791     if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
  2792     verify_region_sets();
  2793     if (!silent) { gclog_or_tty->print("HeapRegions "); }
  2794     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  2795       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2796              "sanity check");
  2798       G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
  2799       int n_workers = workers()->total_workers();
  2800       set_par_threads(n_workers);
  2801       workers()->run_task(&task);
  2802       set_par_threads(0);
  2803       if (task.failures()) {
  2804         failures = true;
  2807       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  2808              "sanity check");
  2810       reset_heap_region_claim_values();
  2812       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2813              "sanity check");
  2814     } else {
  2815       VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
  2816       _hrs->iterate(&blk);
  2817       if (blk.failures()) {
  2818         failures = true;
  2821     if (!silent) gclog_or_tty->print("RemSet ");
  2822     rem_set()->verify();
  2824     if (failures) {
  2825       gclog_or_tty->print_cr("Heap:");
  2826       print_on(gclog_or_tty, true /* extended */);
  2827       gclog_or_tty->print_cr("");
  2828 #ifndef PRODUCT
  2829       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
  2830         concurrent_mark()->print_reachable("at-verification-failure",
  2831                                            use_prev_marking, false /* all */);
  2833 #endif
  2834       gclog_or_tty->flush();
  2836     guarantee(!failures, "there should not have been any failures");
  2837   } else {
  2838     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
  2842 class PrintRegionClosure: public HeapRegionClosure {
  2843   outputStream* _st;
  2844 public:
  2845   PrintRegionClosure(outputStream* st) : _st(st) {}
  2846   bool doHeapRegion(HeapRegion* r) {
  2847     r->print_on(_st);
  2848     return false;
  2850 };
  2852 void G1CollectedHeap::print() const { print_on(tty); }
  2854 void G1CollectedHeap::print_on(outputStream* st) const {
  2855   print_on(st, PrintHeapAtGCExtended);
  2858 void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
  2859   st->print(" %-20s", "garbage-first heap");
  2860   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  2861             capacity()/K, used_unlocked()/K);
  2862   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  2863             _g1_storage.low_boundary(),
  2864             _g1_storage.high(),
  2865             _g1_storage.high_boundary());
  2866   st->cr();
  2867   st->print("  region size " SIZE_FORMAT "K, ",
  2868             HeapRegion::GrainBytes/K);
  2869   size_t young_regions = _young_list->length();
  2870   st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
  2871             young_regions, young_regions * HeapRegion::GrainBytes / K);
  2872   size_t survivor_regions = g1_policy()->recorded_survivor_regions();
  2873   st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
  2874             survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
  2875   st->cr();
  2876   perm()->as_gen()->print_on(st);
  2877   if (extended) {
  2878     st->cr();
  2879     print_on_extended(st);
  2883 void G1CollectedHeap::print_on_extended(outputStream* st) const {
  2884   PrintRegionClosure blk(st);
  2885   _hrs->iterate(&blk);
  2888 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  2889   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2890     workers()->print_worker_threads_on(st);
  2892   _cmThread->print_on(st);
  2893   st->cr();
  2894   _cm->print_worker_threads_on(st);
  2895   _cg1r->print_worker_threads_on(st);
  2896   st->cr();
  2899 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  2900   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2901     workers()->threads_do(tc);
  2903   tc->do_thread(_cmThread);
  2904   _cg1r->threads_do(tc);
  2907 void G1CollectedHeap::print_tracing_info() const {
  2908   // We'll overload this to mean "trace GC pause statistics."
  2909   if (TraceGen0Time || TraceGen1Time) {
  2910     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  2911     // to that.
  2912     g1_policy()->print_tracing_info();
  2914   if (G1SummarizeRSetStats) {
  2915     g1_rem_set()->print_summary_info();
  2917   if (G1SummarizeConcMark) {
  2918     concurrent_mark()->print_summary_info();
  2920   g1_policy()->print_yg_surv_rate_info();
  2921   SpecializationStats::print();
  2924 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
  2925   HeapRegion* hr = heap_region_containing(addr);
  2926   if (hr == NULL) {
  2927     return 0;
  2928   } else {
  2929     return 1;
  2933 G1CollectedHeap* G1CollectedHeap::heap() {
  2934   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  2935          "not a garbage-first heap");
  2936   return _g1h;
  2939 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  2940   // always_do_update_barrier = false;
  2941   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  2942   // Call allocation profiler
  2943   AllocationProfiler::iterate_since_last_gc();
  2944   // Fill TLAB's and such
  2945   ensure_parsability(true);
  2948 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  2949   // FIXME: what is this about?
  2950   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  2951   // is set.
  2952   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  2953                         "derived pointer present"));
  2954   // always_do_update_barrier = true;
  2957 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  2958                                                unsigned int gc_count_before,
  2959                                                bool* succeeded) {
  2960   assert_heap_not_locked_and_not_at_safepoint();
  2961   g1_policy()->record_stop_world_start();
  2962   VM_G1IncCollectionPause op(gc_count_before,
  2963                              word_size,
  2964                              false, /* should_initiate_conc_mark */
  2965                              g1_policy()->max_pause_time_ms(),
  2966                              GCCause::_g1_inc_collection_pause);
  2967   VMThread::execute(&op);
  2969   HeapWord* result = op.result();
  2970   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
  2971   assert(result == NULL || ret_succeeded,
  2972          "the result should be NULL if the VM did not succeed");
  2973   *succeeded = ret_succeeded;
  2975   assert_heap_not_locked();
  2976   return result;
  2979 void
  2980 G1CollectedHeap::doConcurrentMark() {
  2981   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2982   if (!_cmThread->in_progress()) {
  2983     _cmThread->set_started();
  2984     CGC_lock->notify();
  2988 class VerifyMarkedObjsClosure: public ObjectClosure {
  2989     G1CollectedHeap* _g1h;
  2990     public:
  2991     VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
  2992     void do_object(oop obj) {
  2993       assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
  2994              "markandsweep mark should agree with concurrent deadness");
  2996 };
  2998 void
  2999 G1CollectedHeap::checkConcurrentMark() {
  3000     VerifyMarkedObjsClosure verifycl(this);
  3001     //    MutexLockerEx x(getMarkBitMapLock(),
  3002     //              Mutex::_no_safepoint_check_flag);
  3003     object_iterate(&verifycl, false);
  3006 void G1CollectedHeap::do_sync_mark() {
  3007   _cm->checkpointRootsInitial();
  3008   _cm->markFromRoots();
  3009   _cm->checkpointRootsFinal(false);
  3012 // <NEW PREDICTION>
  3014 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
  3015                                                        bool young) {
  3016   return _g1_policy->predict_region_elapsed_time_ms(hr, young);
  3019 void G1CollectedHeap::check_if_region_is_too_expensive(double
  3020                                                            predicted_time_ms) {
  3021   _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
  3024 size_t G1CollectedHeap::pending_card_num() {
  3025   size_t extra_cards = 0;
  3026   JavaThread *curr = Threads::first();
  3027   while (curr != NULL) {
  3028     DirtyCardQueue& dcq = curr->dirty_card_queue();
  3029     extra_cards += dcq.size();
  3030     curr = curr->next();
  3032   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3033   size_t buffer_size = dcqs.buffer_size();
  3034   size_t buffer_num = dcqs.completed_buffers_num();
  3035   return buffer_size * buffer_num + extra_cards;
  3038 size_t G1CollectedHeap::max_pending_card_num() {
  3039   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3040   size_t buffer_size = dcqs.buffer_size();
  3041   size_t buffer_num  = dcqs.completed_buffers_num();
  3042   int thread_num  = Threads::number_of_threads();
  3043   return (buffer_num + thread_num) * buffer_size;
  3046 size_t G1CollectedHeap::cards_scanned() {
  3047   return g1_rem_set()->cardsScanned();
  3050 void
  3051 G1CollectedHeap::setup_surviving_young_words() {
  3052   guarantee( _surviving_young_words == NULL, "pre-condition" );
  3053   size_t array_length = g1_policy()->young_cset_length();
  3054   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
  3055   if (_surviving_young_words == NULL) {
  3056     vm_exit_out_of_memory(sizeof(size_t) * array_length,
  3057                           "Not enough space for young surv words summary.");
  3059   memset(_surviving_young_words, 0, array_length * sizeof(size_t));
  3060 #ifdef ASSERT
  3061   for (size_t i = 0;  i < array_length; ++i) {
  3062     assert( _surviving_young_words[i] == 0, "memset above" );
  3064 #endif // !ASSERT
  3067 void
  3068 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  3069   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3070   size_t array_length = g1_policy()->young_cset_length();
  3071   for (size_t i = 0; i < array_length; ++i)
  3072     _surviving_young_words[i] += surv_young_words[i];
  3075 void
  3076 G1CollectedHeap::cleanup_surviving_young_words() {
  3077   guarantee( _surviving_young_words != NULL, "pre-condition" );
  3078   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
  3079   _surviving_young_words = NULL;
  3082 // </NEW PREDICTION>
  3084 struct PrepareForRSScanningClosure : public HeapRegionClosure {
  3085   bool doHeapRegion(HeapRegion *r) {
  3086     r->rem_set()->set_iter_claimed(0);
  3087     return false;
  3089 };
  3091 #if TASKQUEUE_STATS
  3092 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
  3093   st->print_raw_cr("GC Task Stats");
  3094   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
  3095   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
  3098 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
  3099   print_taskqueue_stats_hdr(st);
  3101   TaskQueueStats totals;
  3102   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3103   for (int i = 0; i < n; ++i) {
  3104     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
  3105     totals += task_queue(i)->stats;
  3107   st->print_raw("tot "); totals.print(st); st->cr();
  3109   DEBUG_ONLY(totals.verify());
  3112 void G1CollectedHeap::reset_taskqueue_stats() {
  3113   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3114   for (int i = 0; i < n; ++i) {
  3115     task_queue(i)->stats.reset();
  3118 #endif // TASKQUEUE_STATS
  3120 bool
  3121 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
  3122   assert_at_safepoint(true /* should_be_vm_thread */);
  3123   guarantee(!is_gc_active(), "collection is not reentrant");
  3125   if (GC_locker::check_active_before_gc()) {
  3126     return false;
  3129   SvcGCMarker sgcm(SvcGCMarker::MINOR);
  3130   ResourceMark rm;
  3132   if (PrintHeapAtGC) {
  3133     Universe::print_heap_before_gc();
  3136   verify_region_sets_optional();
  3137   verify_dirty_young_regions();
  3140     // This call will decide whether this pause is an initial-mark
  3141     // pause. If it is, during_initial_mark_pause() will return true
  3142     // for the duration of this pause.
  3143     g1_policy()->decide_on_conc_mark_initiation();
  3145     char verbose_str[128];
  3146     sprintf(verbose_str, "GC pause ");
  3147     if (g1_policy()->in_young_gc_mode()) {
  3148       if (g1_policy()->full_young_gcs())
  3149         strcat(verbose_str, "(young)");
  3150       else
  3151         strcat(verbose_str, "(partial)");
  3153     if (g1_policy()->during_initial_mark_pause()) {
  3154       strcat(verbose_str, " (initial-mark)");
  3155       // We are about to start a marking cycle, so we increment the
  3156       // full collection counter.
  3157       increment_total_full_collections();
  3160     // if PrintGCDetails is on, we'll print long statistics information
  3161     // in the collector policy code, so let's not print this as the output
  3162     // is messy if we do.
  3163     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  3164     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  3165     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
  3167     TraceMemoryManagerStats tms(false /* fullGC */);
  3169     // If the secondary_free_list is not empty, append it to the
  3170     // free_list. No need to wait for the cleanup operation to finish;
  3171     // the region allocation code will check the secondary_free_list
  3172     // and wait if necessary. If the G1StressConcRegionFreeing flag is
  3173     // set, skip this step so that the region allocation code has to
  3174     // get entries from the secondary_free_list.
  3175     if (!G1StressConcRegionFreeing) {
  3176       append_secondary_free_list_if_not_empty_with_lock();
  3179     increment_gc_time_stamp();
  3181     if (g1_policy()->in_young_gc_mode()) {
  3182       assert(check_young_list_well_formed(),
  3183              "young list should be well formed");
  3186     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  3187       IsGCActiveMark x;
  3189       gc_prologue(false);
  3190       increment_total_collections(false /* full gc */);
  3192 #if G1_REM_SET_LOGGING
  3193       gclog_or_tty->print_cr("\nJust chose CS, heap:");
  3194       print();
  3195 #endif
  3197       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  3198         HandleMark hm;  // Discard invalid handles created during verification
  3199         gclog_or_tty->print(" VerifyBeforeGC:");
  3200         prepare_for_verify();
  3201         Universe::verify(false);
  3204       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3206       // Please see comment in G1CollectedHeap::ref_processing_init()
  3207       // to see how reference processing currently works in G1.
  3208       //
  3209       // We want to turn off ref discovery, if necessary, and turn it back on
  3210       // on again later if we do. XXX Dubious: why is discovery disabled?
  3211       bool was_enabled = ref_processor()->discovery_enabled();
  3212       if (was_enabled) ref_processor()->disable_discovery();
  3214       // Forget the current alloc region (we might even choose it to be part
  3215       // of the collection set!).
  3216       release_mutator_alloc_region();
  3218       // The elapsed time induced by the start time below deliberately elides
  3219       // the possible verification above.
  3220       double start_time_sec = os::elapsedTime();
  3221       size_t start_used_bytes = used();
  3223 #if YOUNG_LIST_VERBOSE
  3224       gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  3225       _young_list->print();
  3226       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3227 #endif // YOUNG_LIST_VERBOSE
  3229       g1_policy()->record_collection_pause_start(start_time_sec,
  3230                                                  start_used_bytes);
  3232 #if YOUNG_LIST_VERBOSE
  3233       gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  3234       _young_list->print();
  3235 #endif // YOUNG_LIST_VERBOSE
  3237       if (g1_policy()->during_initial_mark_pause()) {
  3238         concurrent_mark()->checkpointRootsInitialPre();
  3240       save_marks();
  3242       // We must do this before any possible evacuation that should propagate
  3243       // marks.
  3244       if (mark_in_progress()) {
  3245         double start_time_sec = os::elapsedTime();
  3247         _cm->drainAllSATBBuffers();
  3248         double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  3249         g1_policy()->record_satb_drain_time(finish_mark_ms);
  3251       // Record the number of elements currently on the mark stack, so we
  3252       // only iterate over these.  (Since evacuation may add to the mark
  3253       // stack, doing more exposes race conditions.)  If no mark is in
  3254       // progress, this will be zero.
  3255       _cm->set_oops_do_bound();
  3257       if (mark_in_progress())
  3258         concurrent_mark()->newCSet();
  3260 #if YOUNG_LIST_VERBOSE
  3261       gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  3262       _young_list->print();
  3263       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3264 #endif // YOUNG_LIST_VERBOSE
  3266       g1_policy()->choose_collection_set(target_pause_time_ms);
  3268       // Nothing to do if we were unable to choose a collection set.
  3269 #if G1_REM_SET_LOGGING
  3270       gclog_or_tty->print_cr("\nAfter pause, heap:");
  3271       print();
  3272 #endif
  3273       PrepareForRSScanningClosure prepare_for_rs_scan;
  3274       collection_set_iterate(&prepare_for_rs_scan);
  3276       setup_surviving_young_words();
  3278       // Set up the gc allocation regions.
  3279       get_gc_alloc_regions();
  3281       // Actually do the work...
  3282       evacuate_collection_set();
  3284       free_collection_set(g1_policy()->collection_set());
  3285       g1_policy()->clear_collection_set();
  3287       cleanup_surviving_young_words();
  3289       // Start a new incremental collection set for the next pause.
  3290       g1_policy()->start_incremental_cset_building();
  3292       // Clear the _cset_fast_test bitmap in anticipation of adding
  3293       // regions to the incremental collection set for the next
  3294       // evacuation pause.
  3295       clear_cset_fast_test();
  3297       if (g1_policy()->in_young_gc_mode()) {
  3298         _young_list->reset_sampled_info();
  3300         // Don't check the whole heap at this point as the
  3301         // GC alloc regions from this pause have been tagged
  3302         // as survivors and moved on to the survivor list.
  3303         // Survivor regions will fail the !is_young() check.
  3304         assert(check_young_list_empty(false /* check_heap */),
  3305                "young list should be empty");
  3307 #if YOUNG_LIST_VERBOSE
  3308         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
  3309         _young_list->print();
  3310 #endif // YOUNG_LIST_VERBOSE
  3312         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  3313                                           _young_list->first_survivor_region(),
  3314                                           _young_list->last_survivor_region());
  3316         _young_list->reset_auxilary_lists();
  3319       if (evacuation_failed()) {
  3320         _summary_bytes_used = recalculate_used();
  3321       } else {
  3322         // The "used" of the the collection set have already been subtracted
  3323         // when they were freed.  Add in the bytes evacuated.
  3324         _summary_bytes_used += g1_policy()->bytes_in_to_space();
  3327       if (g1_policy()->in_young_gc_mode() &&
  3328           g1_policy()->during_initial_mark_pause()) {
  3329         concurrent_mark()->checkpointRootsInitialPost();
  3330         set_marking_started();
  3331         // CAUTION: after the doConcurrentMark() call below,
  3332         // the concurrent marking thread(s) could be running
  3333         // concurrently with us. Make sure that anything after
  3334         // this point does not assume that we are the only GC thread
  3335         // running. Note: of course, the actual marking work will
  3336         // not start until the safepoint itself is released in
  3337         // ConcurrentGCThread::safepoint_desynchronize().
  3338         doConcurrentMark();
  3341 #if YOUNG_LIST_VERBOSE
  3342       gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  3343       _young_list->print();
  3344       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3345 #endif // YOUNG_LIST_VERBOSE
  3347       init_mutator_alloc_region();
  3349       double end_time_sec = os::elapsedTime();
  3350       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  3351       g1_policy()->record_pause_time_ms(pause_time_ms);
  3352       g1_policy()->record_collection_pause_end();
  3354       MemoryService::track_memory_usage();
  3356       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  3357         HandleMark hm;  // Discard invalid handles created during verification
  3358         gclog_or_tty->print(" VerifyAfterGC:");
  3359         prepare_for_verify();
  3360         Universe::verify(false);
  3363       if (was_enabled) ref_processor()->enable_discovery();
  3366         size_t expand_bytes = g1_policy()->expansion_amount();
  3367         if (expand_bytes > 0) {
  3368           size_t bytes_before = capacity();
  3369           if (!expand(expand_bytes)) {
  3370             // We failed to expand the heap so let's verify that
  3371             // committed/uncommitted amount match the backing store
  3372             assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
  3373             assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
  3378       if (mark_in_progress()) {
  3379         concurrent_mark()->update_g1_committed();
  3382 #ifdef TRACESPINNING
  3383       ParallelTaskTerminator::print_termination_counts();
  3384 #endif
  3386       gc_epilogue(false);
  3389     if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
  3390       gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
  3391       print_tracing_info();
  3392       vm_exit(-1);
  3396   verify_region_sets_optional();
  3398   TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
  3399   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  3401   if (PrintHeapAtGC) {
  3402     Universe::print_heap_after_gc();
  3404   if (G1SummarizeRSetStats &&
  3405       (G1SummarizeRSetStatsPeriod > 0) &&
  3406       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  3407     g1_rem_set()->print_summary_info();
  3410   return true;
  3413 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
  3415   size_t gclab_word_size;
  3416   switch (purpose) {
  3417     case GCAllocForSurvived:
  3418       gclab_word_size = YoungPLABSize;
  3419       break;
  3420     case GCAllocForTenured:
  3421       gclab_word_size = OldPLABSize;
  3422       break;
  3423     default:
  3424       assert(false, "unknown GCAllocPurpose");
  3425       gclab_word_size = OldPLABSize;
  3426       break;
  3428   return gclab_word_size;
  3431 void G1CollectedHeap::init_mutator_alloc_region() {
  3432   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
  3433   _mutator_alloc_region.init();
  3436 void G1CollectedHeap::release_mutator_alloc_region() {
  3437   _mutator_alloc_region.release();
  3438   assert(_mutator_alloc_region.get() == NULL, "post-condition");
  3441 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
  3442   assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
  3443   // make sure we don't call set_gc_alloc_region() multiple times on
  3444   // the same region
  3445   assert(r == NULL || !r->is_gc_alloc_region(),
  3446          "shouldn't already be a GC alloc region");
  3447   assert(r == NULL || !r->isHumongous(),
  3448          "humongous regions shouldn't be used as GC alloc regions");
  3450   HeapWord* original_top = NULL;
  3451   if (r != NULL)
  3452     original_top = r->top();
  3454   // We will want to record the used space in r as being there before gc.
  3455   // One we install it as a GC alloc region it's eligible for allocation.
  3456   // So record it now and use it later.
  3457   size_t r_used = 0;
  3458   if (r != NULL) {
  3459     r_used = r->used();
  3461     if (G1CollectedHeap::use_parallel_gc_threads()) {
  3462       // need to take the lock to guard against two threads calling
  3463       // get_gc_alloc_region concurrently (very unlikely but...)
  3464       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3465       r->save_marks();
  3468   HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
  3469   _gc_alloc_regions[purpose] = r;
  3470   if (old_alloc_region != NULL) {
  3471     // Replace aliases too.
  3472     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3473       if (_gc_alloc_regions[ap] == old_alloc_region) {
  3474         _gc_alloc_regions[ap] = r;
  3478   if (r != NULL) {
  3479     push_gc_alloc_region(r);
  3480     if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
  3481       // We are using a region as a GC alloc region after it has been used
  3482       // as a mutator allocation region during the current marking cycle.
  3483       // The mutator-allocated objects are currently implicitly marked, but
  3484       // when we move hr->next_top_at_mark_start() forward at the the end
  3485       // of the GC pause, they won't be.  We therefore mark all objects in
  3486       // the "gap".  We do this object-by-object, since marking densely
  3487       // does not currently work right with marking bitmap iteration.  This
  3488       // means we rely on TLAB filling at the start of pauses, and no
  3489       // "resuscitation" of filled TLAB's.  If we want to do this, we need
  3490       // to fix the marking bitmap iteration.
  3491       HeapWord* curhw = r->next_top_at_mark_start();
  3492       HeapWord* t = original_top;
  3494       while (curhw < t) {
  3495         oop cur = (oop)curhw;
  3496         // We'll assume parallel for generality.  This is rare code.
  3497         concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
  3498         curhw = curhw + cur->size();
  3500       assert(curhw == t, "Should have parsed correctly.");
  3502     if (G1PolicyVerbose > 1) {
  3503       gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
  3504                           "for survivors:", r->bottom(), original_top, r->end());
  3505       r->print();
  3507     g1_policy()->record_before_bytes(r_used);
  3511 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
  3512   assert(Thread::current()->is_VM_thread() ||
  3513          FreeList_lock->owned_by_self(), "Precondition");
  3514   assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
  3515          "Precondition.");
  3516   hr->set_is_gc_alloc_region(true);
  3517   hr->set_next_gc_alloc_region(_gc_alloc_region_list);
  3518   _gc_alloc_region_list = hr;
  3521 #ifdef G1_DEBUG
  3522 class FindGCAllocRegion: public HeapRegionClosure {
  3523 public:
  3524   bool doHeapRegion(HeapRegion* r) {
  3525     if (r->is_gc_alloc_region()) {
  3526       gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
  3527                              r->hrs_index(), r->bottom());
  3529     return false;
  3531 };
  3532 #endif // G1_DEBUG
  3534 void G1CollectedHeap::forget_alloc_region_list() {
  3535   assert_at_safepoint(true /* should_be_vm_thread */);
  3536   while (_gc_alloc_region_list != NULL) {
  3537     HeapRegion* r = _gc_alloc_region_list;
  3538     assert(r->is_gc_alloc_region(), "Invariant.");
  3539     // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
  3540     // newly allocated data in order to be able to apply deferred updates
  3541     // before the GC is done for verification purposes (i.e to allow
  3542     // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
  3543     // collection.
  3544     r->ContiguousSpace::set_saved_mark();
  3545     _gc_alloc_region_list = r->next_gc_alloc_region();
  3546     r->set_next_gc_alloc_region(NULL);
  3547     r->set_is_gc_alloc_region(false);
  3548     if (r->is_survivor()) {
  3549       if (r->is_empty()) {
  3550         r->set_not_young();
  3551       } else {
  3552         _young_list->add_survivor_region(r);
  3556 #ifdef G1_DEBUG
  3557   FindGCAllocRegion fa;
  3558   heap_region_iterate(&fa);
  3559 #endif // G1_DEBUG
  3563 bool G1CollectedHeap::check_gc_alloc_regions() {
  3564   // TODO: allocation regions check
  3565   return true;
  3568 void G1CollectedHeap::get_gc_alloc_regions() {
  3569   // First, let's check that the GC alloc region list is empty (it should)
  3570   assert(_gc_alloc_region_list == NULL, "invariant");
  3572   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3573     assert(_gc_alloc_regions[ap] == NULL, "invariant");
  3574     assert(_gc_alloc_region_counts[ap] == 0, "invariant");
  3576     // Create new GC alloc regions.
  3577     HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
  3578     _retained_gc_alloc_regions[ap] = NULL;
  3580     if (alloc_region != NULL) {
  3581       assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
  3583       // let's make sure that the GC alloc region is not tagged as such
  3584       // outside a GC operation
  3585       assert(!alloc_region->is_gc_alloc_region(), "sanity");
  3587       if (alloc_region->in_collection_set() ||
  3588           alloc_region->top() == alloc_region->end() ||
  3589           alloc_region->top() == alloc_region->bottom() ||
  3590           alloc_region->isHumongous()) {
  3591         // we will discard the current GC alloc region if
  3592         // * it's in the collection set (it can happen!),
  3593         // * it's already full (no point in using it),
  3594         // * it's empty (this means that it was emptied during
  3595         // a cleanup and it should be on the free list now), or
  3596         // * it's humongous (this means that it was emptied
  3597         // during a cleanup and was added to the free list, but
  3598         // has been subseqently used to allocate a humongous
  3599         // object that may be less than the region size).
  3601         alloc_region = NULL;
  3605     if (alloc_region == NULL) {
  3606       // we will get a new GC alloc region
  3607       alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords);
  3608     } else {
  3609       // the region was retained from the last collection
  3610       ++_gc_alloc_region_counts[ap];
  3611       if (G1PrintHeapRegions) {
  3612         gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
  3613                                "top "PTR_FORMAT,
  3614                                alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
  3618     if (alloc_region != NULL) {
  3619       assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
  3620       set_gc_alloc_region(ap, alloc_region);
  3623     assert(_gc_alloc_regions[ap] == NULL ||
  3624            _gc_alloc_regions[ap]->is_gc_alloc_region(),
  3625            "the GC alloc region should be tagged as such");
  3626     assert(_gc_alloc_regions[ap] == NULL ||
  3627            _gc_alloc_regions[ap] == _gc_alloc_region_list,
  3628            "the GC alloc region should be the same as the GC alloc list head");
  3630   // Set alternative regions for allocation purposes that have reached
  3631   // their limit.
  3632   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3633     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
  3634     if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
  3635       _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
  3638   assert(check_gc_alloc_regions(), "alloc regions messed up");
  3641 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
  3642   // We keep a separate list of all regions that have been alloc regions in
  3643   // the current collection pause. Forget that now. This method will
  3644   // untag the GC alloc regions and tear down the GC alloc region
  3645   // list. It's desirable that no regions are tagged as GC alloc
  3646   // outside GCs.
  3648   forget_alloc_region_list();
  3650   // The current alloc regions contain objs that have survived
  3651   // collection. Make them no longer GC alloc regions.
  3652   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3653     HeapRegion* r = _gc_alloc_regions[ap];
  3654     _retained_gc_alloc_regions[ap] = NULL;
  3655     _gc_alloc_region_counts[ap] = 0;
  3657     if (r != NULL) {
  3658       // we retain nothing on _gc_alloc_regions between GCs
  3659       set_gc_alloc_region(ap, NULL);
  3661       if (r->is_empty()) {
  3662         // We didn't actually allocate anything in it; let's just put
  3663         // it back on the free list.
  3664         _free_list.add_as_head(r);
  3665       } else if (_retain_gc_alloc_region[ap] && !totally) {
  3666         // retain it so that we can use it at the beginning of the next GC
  3667         _retained_gc_alloc_regions[ap] = r;
  3673 #ifndef PRODUCT
  3674 // Useful for debugging
  3676 void G1CollectedHeap::print_gc_alloc_regions() {
  3677   gclog_or_tty->print_cr("GC alloc regions");
  3678   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3679     HeapRegion* r = _gc_alloc_regions[ap];
  3680     if (r == NULL) {
  3681       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT, ap, NULL);
  3682     } else {
  3683       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT" "SIZE_FORMAT,
  3684                              ap, r->bottom(), r->used());
  3688 #endif // PRODUCT
  3690 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  3691   _drain_in_progress = false;
  3692   set_evac_failure_closure(cl);
  3693   _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3696 void G1CollectedHeap::finalize_for_evac_failure() {
  3697   assert(_evac_failure_scan_stack != NULL &&
  3698          _evac_failure_scan_stack->length() == 0,
  3699          "Postcondition");
  3700   assert(!_drain_in_progress, "Postcondition");
  3701   delete _evac_failure_scan_stack;
  3702   _evac_failure_scan_stack = NULL;
  3707 // *** Sequential G1 Evacuation
  3709 class G1IsAliveClosure: public BoolObjectClosure {
  3710   G1CollectedHeap* _g1;
  3711 public:
  3712   G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3713   void do_object(oop p) { assert(false, "Do not call."); }
  3714   bool do_object_b(oop p) {
  3715     // It is reachable if it is outside the collection set, or is inside
  3716     // and forwarded.
  3718 #ifdef G1_DEBUG
  3719     gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
  3720                            (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
  3721                            !_g1->obj_in_cs(p) || p->is_forwarded());
  3722 #endif // G1_DEBUG
  3724     return !_g1->obj_in_cs(p) || p->is_forwarded();
  3726 };
  3728 class G1KeepAliveClosure: public OopClosure {
  3729   G1CollectedHeap* _g1;
  3730 public:
  3731   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3732   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  3733   void do_oop(      oop* p) {
  3734     oop obj = *p;
  3735 #ifdef G1_DEBUG
  3736     if (PrintGC && Verbose) {
  3737       gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
  3738                              p, (void*) obj, (void*) *p);
  3740 #endif // G1_DEBUG
  3742     if (_g1->obj_in_cs(obj)) {
  3743       assert( obj->is_forwarded(), "invariant" );
  3744       *p = obj->forwardee();
  3745 #ifdef G1_DEBUG
  3746       gclog_or_tty->print_cr("     in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
  3747                              (void*) obj, (void*) *p);
  3748 #endif // G1_DEBUG
  3751 };
  3753 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
  3754 private:
  3755   G1CollectedHeap* _g1;
  3756   DirtyCardQueue *_dcq;
  3757   CardTableModRefBS* _ct_bs;
  3759 public:
  3760   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
  3761     _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
  3763   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  3764   virtual void do_oop(      oop* p) { do_oop_work(p); }
  3765   template <class T> void do_oop_work(T* p) {
  3766     assert(_from->is_in_reserved(p), "paranoia");
  3767     if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
  3768         !_from->is_survivor()) {
  3769       size_t card_index = _ct_bs->index_for(p);
  3770       if (_ct_bs->mark_card_deferred(card_index)) {
  3771         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
  3775 };
  3777 class RemoveSelfPointerClosure: public ObjectClosure {
  3778 private:
  3779   G1CollectedHeap* _g1;
  3780   ConcurrentMark* _cm;
  3781   HeapRegion* _hr;
  3782   size_t _prev_marked_bytes;
  3783   size_t _next_marked_bytes;
  3784   OopsInHeapRegionClosure *_cl;
  3785 public:
  3786   RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr,
  3787                            OopsInHeapRegionClosure* cl) :
  3788     _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
  3789     _next_marked_bytes(0), _cl(cl) {}
  3791   size_t prev_marked_bytes() { return _prev_marked_bytes; }
  3792   size_t next_marked_bytes() { return _next_marked_bytes; }
  3794   // <original comment>
  3795   // The original idea here was to coalesce evacuated and dead objects.
  3796   // However that caused complications with the block offset table (BOT).
  3797   // In particular if there were two TLABs, one of them partially refined.
  3798   // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
  3799   // The BOT entries of the unrefined part of TLAB_2 point to the start
  3800   // of TLAB_2. If the last object of the TLAB_1 and the first object
  3801   // of TLAB_2 are coalesced, then the cards of the unrefined part
  3802   // would point into middle of the filler object.
  3803   // The current approach is to not coalesce and leave the BOT contents intact.
  3804   // </original comment>
  3805   //
  3806   // We now reset the BOT when we start the object iteration over the
  3807   // region and refine its entries for every object we come across. So
  3808   // the above comment is not really relevant and we should be able
  3809   // to coalesce dead objects if we want to.
  3810   void do_object(oop obj) {
  3811     HeapWord* obj_addr = (HeapWord*) obj;
  3812     assert(_hr->is_in(obj_addr), "sanity");
  3813     size_t obj_size = obj->size();
  3814     _hr->update_bot_for_object(obj_addr, obj_size);
  3815     if (obj->is_forwarded() && obj->forwardee() == obj) {
  3816       // The object failed to move.
  3817       assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
  3818       _cm->markPrev(obj);
  3819       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3820       _prev_marked_bytes += (obj_size * HeapWordSize);
  3821       if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
  3822         _cm->markAndGrayObjectIfNecessary(obj);
  3824       obj->set_mark(markOopDesc::prototype());
  3825       // While we were processing RSet buffers during the
  3826       // collection, we actually didn't scan any cards on the
  3827       // collection set, since we didn't want to update remebered
  3828       // sets with entries that point into the collection set, given
  3829       // that live objects fromthe collection set are about to move
  3830       // and such entries will be stale very soon. This change also
  3831       // dealt with a reliability issue which involved scanning a
  3832       // card in the collection set and coming across an array that
  3833       // was being chunked and looking malformed. The problem is
  3834       // that, if evacuation fails, we might have remembered set
  3835       // entries missing given that we skipped cards on the
  3836       // collection set. So, we'll recreate such entries now.
  3837       obj->oop_iterate(_cl);
  3838       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3839     } else {
  3840       // The object has been either evacuated or is dead. Fill it with a
  3841       // dummy object.
  3842       MemRegion mr((HeapWord*)obj, obj_size);
  3843       CollectedHeap::fill_with_object(mr);
  3844       _cm->clearRangeBothMaps(mr);
  3847 };
  3849 void G1CollectedHeap::remove_self_forwarding_pointers() {
  3850   UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
  3851   DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
  3852   UpdateRSetDeferred deferred_update(_g1h, &dcq);
  3853   OopsInHeapRegionClosure *cl;
  3854   if (G1DeferredRSUpdate) {
  3855     cl = &deferred_update;
  3856   } else {
  3857     cl = &immediate_update;
  3859   HeapRegion* cur = g1_policy()->collection_set();
  3860   while (cur != NULL) {
  3861     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3862     assert(!cur->isHumongous(), "sanity");
  3864     if (cur->evacuation_failed()) {
  3865       assert(cur->in_collection_set(), "bad CS");
  3866       RemoveSelfPointerClosure rspc(_g1h, cur, cl);
  3868       cur->reset_bot();
  3869       cl->set_region(cur);
  3870       cur->object_iterate(&rspc);
  3872       // A number of manipulations to make the TAMS be the current top,
  3873       // and the marked bytes be the ones observed in the iteration.
  3874       if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
  3875         // The comments below are the postconditions achieved by the
  3876         // calls.  Note especially the last such condition, which says that
  3877         // the count of marked bytes has been properly restored.
  3878         cur->note_start_of_marking(false);
  3879         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3880         cur->add_to_marked_bytes(rspc.prev_marked_bytes());
  3881         // _next_marked_bytes == prev_marked_bytes.
  3882         cur->note_end_of_marking();
  3883         // _prev_top_at_mark_start == top(),
  3884         // _prev_marked_bytes == prev_marked_bytes
  3886       // If there is no mark in progress, we modified the _next variables
  3887       // above needlessly, but harmlessly.
  3888       if (_g1h->mark_in_progress()) {
  3889         cur->note_start_of_marking(false);
  3890         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3891         // _next_marked_bytes == next_marked_bytes.
  3894       // Now make sure the region has the right index in the sorted array.
  3895       g1_policy()->note_change_in_marked_bytes(cur);
  3897     cur = cur->next_in_collection_set();
  3899   assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3901   // Now restore saved marks, if any.
  3902   if (_objs_with_preserved_marks != NULL) {
  3903     assert(_preserved_marks_of_objs != NULL, "Both or none.");
  3904     guarantee(_objs_with_preserved_marks->length() ==
  3905               _preserved_marks_of_objs->length(), "Both or none.");
  3906     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
  3907       oop obj   = _objs_with_preserved_marks->at(i);
  3908       markOop m = _preserved_marks_of_objs->at(i);
  3909       obj->set_mark(m);
  3911     // Delete the preserved marks growable arrays (allocated on the C heap).
  3912     delete _objs_with_preserved_marks;
  3913     delete _preserved_marks_of_objs;
  3914     _objs_with_preserved_marks = NULL;
  3915     _preserved_marks_of_objs = NULL;
  3919 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  3920   _evac_failure_scan_stack->push(obj);
  3923 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  3924   assert(_evac_failure_scan_stack != NULL, "precondition");
  3926   while (_evac_failure_scan_stack->length() > 0) {
  3927      oop obj = _evac_failure_scan_stack->pop();
  3928      _evac_failure_closure->set_region(heap_region_containing(obj));
  3929      obj->oop_iterate_backwards(_evac_failure_closure);
  3933 oop
  3934 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  3935                                                oop old) {
  3936   markOop m = old->mark();
  3937   oop forward_ptr = old->forward_to_atomic(old);
  3938   if (forward_ptr == NULL) {
  3939     // Forward-to-self succeeded.
  3940     if (_evac_failure_closure != cl) {
  3941       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  3942       assert(!_drain_in_progress,
  3943              "Should only be true while someone holds the lock.");
  3944       // Set the global evac-failure closure to the current thread's.
  3945       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  3946       set_evac_failure_closure(cl);
  3947       // Now do the common part.
  3948       handle_evacuation_failure_common(old, m);
  3949       // Reset to NULL.
  3950       set_evac_failure_closure(NULL);
  3951     } else {
  3952       // The lock is already held, and this is recursive.
  3953       assert(_drain_in_progress, "This should only be the recursive case.");
  3954       handle_evacuation_failure_common(old, m);
  3956     return old;
  3957   } else {
  3958     // Someone else had a place to copy it.
  3959     return forward_ptr;
  3963 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  3964   set_evacuation_failed(true);
  3966   preserve_mark_if_necessary(old, m);
  3968   HeapRegion* r = heap_region_containing(old);
  3969   if (!r->evacuation_failed()) {
  3970     r->set_evacuation_failed(true);
  3971     if (G1PrintHeapRegions) {
  3972       gclog_or_tty->print("overflow in heap region "PTR_FORMAT" "
  3973                           "["PTR_FORMAT","PTR_FORMAT")\n",
  3974                           r, r->bottom(), r->end());
  3978   push_on_evac_failure_scan_stack(old);
  3980   if (!_drain_in_progress) {
  3981     // prevent recursion in copy_to_survivor_space()
  3982     _drain_in_progress = true;
  3983     drain_evac_failure_scan_stack();
  3984     _drain_in_progress = false;
  3988 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  3989   assert(evacuation_failed(), "Oversaving!");
  3990   // We want to call the "for_promotion_failure" version only in the
  3991   // case of a promotion failure.
  3992   if (m->must_be_preserved_for_promotion_failure(obj)) {
  3993     if (_objs_with_preserved_marks == NULL) {
  3994       assert(_preserved_marks_of_objs == NULL, "Both or none.");
  3995       _objs_with_preserved_marks =
  3996         new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3997       _preserved_marks_of_objs =
  3998         new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
  4000     _objs_with_preserved_marks->push(obj);
  4001     _preserved_marks_of_objs->push(m);
  4005 // *** Parallel G1 Evacuation
  4007 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  4008                                                   size_t word_size) {
  4009   assert(!isHumongous(word_size),
  4010          err_msg("we should not be seeing humongous allocation requests "
  4011                  "during GC, word_size = "SIZE_FORMAT, word_size));
  4013   HeapRegion* alloc_region = _gc_alloc_regions[purpose];
  4014   // let the caller handle alloc failure
  4015   if (alloc_region == NULL) return NULL;
  4017   HeapWord* block = alloc_region->par_allocate(word_size);
  4018   if (block == NULL) {
  4019     block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
  4021   return block;
  4024 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
  4025                                             bool par) {
  4026   // Another thread might have obtained alloc_region for the given
  4027   // purpose, and might be attempting to allocate in it, and might
  4028   // succeed.  Therefore, we can't do the "finalization" stuff on the
  4029   // region below until we're sure the last allocation has happened.
  4030   // We ensure this by allocating the remaining space with a garbage
  4031   // object.
  4032   if (par) par_allocate_remaining_space(alloc_region);
  4033   // Now we can do the post-GC stuff on the region.
  4034   alloc_region->note_end_of_copying();
  4035   g1_policy()->record_after_bytes(alloc_region->used());
  4038 HeapWord*
  4039 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
  4040                                          HeapRegion*    alloc_region,
  4041                                          bool           par,
  4042                                          size_t         word_size) {
  4043   assert(!isHumongous(word_size),
  4044          err_msg("we should not be seeing humongous allocation requests "
  4045                  "during GC, word_size = "SIZE_FORMAT, word_size));
  4047   // We need to make sure we serialize calls to this method. Given
  4048   // that the FreeList_lock guards accesses to the free_list anyway,
  4049   // and we need to potentially remove a region from it, we'll use it
  4050   // to protect the whole call.
  4051   MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  4053   HeapWord* block = NULL;
  4054   // In the parallel case, a previous thread to obtain the lock may have
  4055   // already assigned a new gc_alloc_region.
  4056   if (alloc_region != _gc_alloc_regions[purpose]) {
  4057     assert(par, "But should only happen in parallel case.");
  4058     alloc_region = _gc_alloc_regions[purpose];
  4059     if (alloc_region == NULL) return NULL;
  4060     block = alloc_region->par_allocate(word_size);
  4061     if (block != NULL) return block;
  4062     // Otherwise, continue; this new region is empty, too.
  4064   assert(alloc_region != NULL, "We better have an allocation region");
  4065   retire_alloc_region(alloc_region, par);
  4067   if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
  4068     // Cannot allocate more regions for the given purpose.
  4069     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
  4070     // Is there an alternative?
  4071     if (purpose != alt_purpose) {
  4072       HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
  4073       // Has not the alternative region been aliased?
  4074       if (alloc_region != alt_region && alt_region != NULL) {
  4075         // Try to allocate in the alternative region.
  4076         if (par) {
  4077           block = alt_region->par_allocate(word_size);
  4078         } else {
  4079           block = alt_region->allocate(word_size);
  4081         // Make an alias.
  4082         _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
  4083         if (block != NULL) {
  4084           return block;
  4086         retire_alloc_region(alt_region, par);
  4088       // Both the allocation region and the alternative one are full
  4089       // and aliased, replace them with a new allocation region.
  4090       purpose = alt_purpose;
  4091     } else {
  4092       set_gc_alloc_region(purpose, NULL);
  4093       return NULL;
  4097   // Now allocate a new region for allocation.
  4098   alloc_region = new_gc_alloc_region(purpose, word_size);
  4100   // let the caller handle alloc failure
  4101   if (alloc_region != NULL) {
  4103     assert(check_gc_alloc_regions(), "alloc regions messed up");
  4104     assert(alloc_region->saved_mark_at_top(),
  4105            "Mark should have been saved already.");
  4106     // This must be done last: once it's installed, other regions may
  4107     // allocate in it (without holding the lock.)
  4108     set_gc_alloc_region(purpose, alloc_region);
  4110     if (par) {
  4111       block = alloc_region->par_allocate(word_size);
  4112     } else {
  4113       block = alloc_region->allocate(word_size);
  4115     // Caller handles alloc failure.
  4116   } else {
  4117     // This sets other apis using the same old alloc region to NULL, also.
  4118     set_gc_alloc_region(purpose, NULL);
  4120   return block;  // May be NULL.
  4123 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
  4124   HeapWord* block = NULL;
  4125   size_t free_words;
  4126   do {
  4127     free_words = r->free()/HeapWordSize;
  4128     // If there's too little space, no one can allocate, so we're done.
  4129     if (free_words < CollectedHeap::min_fill_size()) return;
  4130     // Otherwise, try to claim it.
  4131     block = r->par_allocate(free_words);
  4132   } while (block == NULL);
  4133   fill_with_object(block, free_words);
  4136 #ifndef PRODUCT
  4137 bool GCLabBitMapClosure::do_bit(size_t offset) {
  4138   HeapWord* addr = _bitmap->offsetToHeapWord(offset);
  4139   guarantee(_cm->isMarked(oop(addr)), "it should be!");
  4140   return true;
  4142 #endif // PRODUCT
  4144 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
  4145   : _g1h(g1h),
  4146     _refs(g1h->task_queue(queue_num)),
  4147     _dcq(&g1h->dirty_card_queue_set()),
  4148     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
  4149     _g1_rem(g1h->g1_rem_set()),
  4150     _hash_seed(17), _queue_num(queue_num),
  4151     _term_attempts(0),
  4152     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
  4153     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
  4154     _age_table(false),
  4155     _strong_roots_time(0), _term_time(0),
  4156     _alloc_buffer_waste(0), _undo_waste(0)
  4158   // we allocate G1YoungSurvRateNumRegions plus one entries, since
  4159   // we "sacrifice" entry 0 to keep track of surviving bytes for
  4160   // non-young regions (where the age is -1)
  4161   // We also add a few elements at the beginning and at the end in
  4162   // an attempt to eliminate cache contention
  4163   size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
  4164   size_t array_length = PADDING_ELEM_NUM +
  4165                         real_length +
  4166                         PADDING_ELEM_NUM;
  4167   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
  4168   if (_surviving_young_words_base == NULL)
  4169     vm_exit_out_of_memory(array_length * sizeof(size_t),
  4170                           "Not enough space for young surv histo.");
  4171   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  4172   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  4174   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
  4175   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
  4177   _start = os::elapsedTime();
  4180 void
  4181 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
  4183   st->print_raw_cr("GC Termination Stats");
  4184   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
  4185                    " ------waste (KiB)------");
  4186   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
  4187                    "  total   alloc    undo");
  4188   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
  4189                    " ------- ------- -------");
  4192 void
  4193 G1ParScanThreadState::print_termination_stats(int i,
  4194                                               outputStream* const st) const
  4196   const double elapsed_ms = elapsed_time() * 1000.0;
  4197   const double s_roots_ms = strong_roots_time() * 1000.0;
  4198   const double term_ms    = term_time() * 1000.0;
  4199   st->print_cr("%3d %9.2f %9.2f %6.2f "
  4200                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
  4201                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
  4202                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
  4203                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
  4204                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
  4205                alloc_buffer_waste() * HeapWordSize / K,
  4206                undo_waste() * HeapWordSize / K);
  4209 #ifdef ASSERT
  4210 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
  4211   assert(ref != NULL, "invariant");
  4212   assert(UseCompressedOops, "sanity");
  4213   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
  4214   oop p = oopDesc::load_decode_heap_oop(ref);
  4215   assert(_g1h->is_in_g1_reserved(p),
  4216          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4217   return true;
  4220 bool G1ParScanThreadState::verify_ref(oop* ref) const {
  4221   assert(ref != NULL, "invariant");
  4222   if (has_partial_array_mask(ref)) {
  4223     // Must be in the collection set--it's already been copied.
  4224     oop p = clear_partial_array_mask(ref);
  4225     assert(_g1h->obj_in_cs(p),
  4226            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4227   } else {
  4228     oop p = oopDesc::load_decode_heap_oop(ref);
  4229     assert(_g1h->is_in_g1_reserved(p),
  4230            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4232   return true;
  4235 bool G1ParScanThreadState::verify_task(StarTask ref) const {
  4236   if (ref.is_narrow()) {
  4237     return verify_ref((narrowOop*) ref);
  4238   } else {
  4239     return verify_ref((oop*) ref);
  4242 #endif // ASSERT
  4244 void G1ParScanThreadState::trim_queue() {
  4245   StarTask ref;
  4246   do {
  4247     // Drain the overflow stack first, so other threads can steal.
  4248     while (refs()->pop_overflow(ref)) {
  4249       deal_with_reference(ref);
  4251     while (refs()->pop_local(ref)) {
  4252       deal_with_reference(ref);
  4254   } while (!refs()->is_empty());
  4257 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  4258   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  4259   _par_scan_state(par_scan_state) { }
  4261 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
  4262   // This is called _after_ do_oop_work has been called, hence after
  4263   // the object has been relocated to its new location and *p points
  4264   // to its new location.
  4266   T heap_oop = oopDesc::load_heap_oop(p);
  4267   if (!oopDesc::is_null(heap_oop)) {
  4268     oop obj = oopDesc::decode_heap_oop(heap_oop);
  4269     assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
  4270            "shouldn't still be in the CSet if evacuation didn't fail.");
  4271     HeapWord* addr = (HeapWord*)obj;
  4272     if (_g1->is_in_g1_reserved(addr))
  4273       _cm->grayRoot(oop(addr));
  4277 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
  4278   size_t    word_sz = old->size();
  4279   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  4280   // +1 to make the -1 indexes valid...
  4281   int       young_index = from_region->young_index_in_cset()+1;
  4282   assert( (from_region->is_young() && young_index > 0) ||
  4283           (!from_region->is_young() && young_index == 0), "invariant" );
  4284   G1CollectorPolicy* g1p = _g1->g1_policy();
  4285   markOop m = old->mark();
  4286   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  4287                                            : m->age();
  4288   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  4289                                                              word_sz);
  4290   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
  4291   oop       obj     = oop(obj_ptr);
  4293   if (obj_ptr == NULL) {
  4294     // This will either forward-to-self, or detect that someone else has
  4295     // installed a forwarding pointer.
  4296     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  4297     return _g1->handle_evacuation_failure_par(cl, old);
  4300   // We're going to allocate linearly, so might as well prefetch ahead.
  4301   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
  4303   oop forward_ptr = old->forward_to_atomic(obj);
  4304   if (forward_ptr == NULL) {
  4305     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
  4306     if (g1p->track_object_age(alloc_purpose)) {
  4307       // We could simply do obj->incr_age(). However, this causes a
  4308       // performance issue. obj->incr_age() will first check whether
  4309       // the object has a displaced mark by checking its mark word;
  4310       // getting the mark word from the new location of the object
  4311       // stalls. So, given that we already have the mark word and we
  4312       // are about to install it anyway, it's better to increase the
  4313       // age on the mark word, when the object does not have a
  4314       // displaced mark word. We're not expecting many objects to have
  4315       // a displaced marked word, so that case is not optimized
  4316       // further (it could be...) and we simply call obj->incr_age().
  4318       if (m->has_displaced_mark_helper()) {
  4319         // in this case, we have to install the mark word first,
  4320         // otherwise obj looks to be forwarded (the old mark word,
  4321         // which contains the forward pointer, was copied)
  4322         obj->set_mark(m);
  4323         obj->incr_age();
  4324       } else {
  4325         m = m->incr_age();
  4326         obj->set_mark(m);
  4328       _par_scan_state->age_table()->add(obj, word_sz);
  4329     } else {
  4330       obj->set_mark(m);
  4333     // preserve "next" mark bit
  4334     if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
  4335       if (!use_local_bitmaps ||
  4336           !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
  4337         // if we couldn't mark it on the local bitmap (this happens when
  4338         // the object was not allocated in the GCLab), we have to bite
  4339         // the bullet and do the standard parallel mark
  4340         _cm->markAndGrayObjectIfNecessary(obj);
  4342 #if 1
  4343       if (_g1->isMarkedNext(old)) {
  4344         _cm->nextMarkBitMap()->parClear((HeapWord*)old);
  4346 #endif
  4349     size_t* surv_young_words = _par_scan_state->surviving_young_words();
  4350     surv_young_words[young_index] += word_sz;
  4352     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
  4353       arrayOop(old)->set_length(0);
  4354       oop* old_p = set_partial_array_mask(old);
  4355       _par_scan_state->push_on_queue(old_p);
  4356     } else {
  4357       // No point in using the slower heap_region_containing() method,
  4358       // given that we know obj is in the heap.
  4359       _scanner->set_region(_g1->heap_region_containing_raw(obj));
  4360       obj->oop_iterate_backwards(_scanner);
  4362   } else {
  4363     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
  4364     obj = forward_ptr;
  4366   return obj;
  4369 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
  4370 template <class T>
  4371 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee>
  4372 ::do_oop_work(T* p) {
  4373   oop obj = oopDesc::load_decode_heap_oop(p);
  4374   assert(barrier != G1BarrierRS || obj != NULL,
  4375          "Precondition: G1BarrierRS implies obj is nonNull");
  4377   // here the null check is implicit in the cset_fast_test() test
  4378   if (_g1->in_cset_fast_test(obj)) {
  4379 #if G1_REM_SET_LOGGING
  4380     gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
  4381                            "into CS.", p, (void*) obj);
  4382 #endif
  4383     if (obj->is_forwarded()) {
  4384       oopDesc::encode_store_heap_oop(p, obj->forwardee());
  4385     } else {
  4386       oop copy_oop = copy_to_survivor_space(obj);
  4387       oopDesc::encode_store_heap_oop(p, copy_oop);
  4389     // When scanning the RS, we only care about objs in CS.
  4390     if (barrier == G1BarrierRS) {
  4391       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  4395   if (barrier == G1BarrierEvac && obj != NULL) {
  4396     _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  4399   if (do_gen_barrier && obj != NULL) {
  4400     par_do_barrier(p);
  4404 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
  4405 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
  4407 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
  4408   assert(has_partial_array_mask(p), "invariant");
  4409   oop old = clear_partial_array_mask(p);
  4410   assert(old->is_objArray(), "must be obj array");
  4411   assert(old->is_forwarded(), "must be forwarded");
  4412   assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
  4414   objArrayOop obj = objArrayOop(old->forwardee());
  4415   assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
  4416   // Process ParGCArrayScanChunk elements now
  4417   // and push the remainder back onto queue
  4418   int start     = arrayOop(old)->length();
  4419   int end       = obj->length();
  4420   int remainder = end - start;
  4421   assert(start <= end, "just checking");
  4422   if (remainder > 2 * ParGCArrayScanChunk) {
  4423     // Test above combines last partial chunk with a full chunk
  4424     end = start + ParGCArrayScanChunk;
  4425     arrayOop(old)->set_length(end);
  4426     // Push remainder.
  4427     oop* old_p = set_partial_array_mask(old);
  4428     assert(arrayOop(old)->length() < obj->length(), "Empty push?");
  4429     _par_scan_state->push_on_queue(old_p);
  4430   } else {
  4431     // Restore length so that the heap remains parsable in
  4432     // case of evacuation failure.
  4433     arrayOop(old)->set_length(end);
  4435   _scanner.set_region(_g1->heap_region_containing_raw(obj));
  4436   // process our set of indices (include header in first chunk)
  4437   obj->oop_iterate_range(&_scanner, start, end);
  4440 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4441 protected:
  4442   G1CollectedHeap*              _g1h;
  4443   G1ParScanThreadState*         _par_scan_state;
  4444   RefToScanQueueSet*            _queues;
  4445   ParallelTaskTerminator*       _terminator;
  4447   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  4448   RefToScanQueueSet*      queues()         { return _queues; }
  4449   ParallelTaskTerminator* terminator()     { return _terminator; }
  4451 public:
  4452   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  4453                                 G1ParScanThreadState* par_scan_state,
  4454                                 RefToScanQueueSet* queues,
  4455                                 ParallelTaskTerminator* terminator)
  4456     : _g1h(g1h), _par_scan_state(par_scan_state),
  4457       _queues(queues), _terminator(terminator) {}
  4459   void do_void();
  4461 private:
  4462   inline bool offer_termination();
  4463 };
  4465 bool G1ParEvacuateFollowersClosure::offer_termination() {
  4466   G1ParScanThreadState* const pss = par_scan_state();
  4467   pss->start_term_time();
  4468   const bool res = terminator()->offer_termination();
  4469   pss->end_term_time();
  4470   return res;
  4473 void G1ParEvacuateFollowersClosure::do_void() {
  4474   StarTask stolen_task;
  4475   G1ParScanThreadState* const pss = par_scan_state();
  4476   pss->trim_queue();
  4478   do {
  4479     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  4480       assert(pss->verify_task(stolen_task), "sanity");
  4481       if (stolen_task.is_narrow()) {
  4482         pss->deal_with_reference((narrowOop*) stolen_task);
  4483       } else {
  4484         pss->deal_with_reference((oop*) stolen_task);
  4487       // We've just processed a reference and we might have made
  4488       // available new entries on the queues. So we have to make sure
  4489       // we drain the queues as necessary.
  4490       pss->trim_queue();
  4492   } while (!offer_termination());
  4494   pss->retire_alloc_buffers();
  4497 class G1ParTask : public AbstractGangTask {
  4498 protected:
  4499   G1CollectedHeap*       _g1h;
  4500   RefToScanQueueSet      *_queues;
  4501   ParallelTaskTerminator _terminator;
  4502   int _n_workers;
  4504   Mutex _stats_lock;
  4505   Mutex* stats_lock() { return &_stats_lock; }
  4507   size_t getNCards() {
  4508     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  4509       / G1BlockOffsetSharedArray::N_bytes;
  4512 public:
  4513   G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
  4514     : AbstractGangTask("G1 collection"),
  4515       _g1h(g1h),
  4516       _queues(task_queues),
  4517       _terminator(workers, _queues),
  4518       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
  4519       _n_workers(workers)
  4520   {}
  4522   RefToScanQueueSet* queues() { return _queues; }
  4524   RefToScanQueue *work_queue(int i) {
  4525     return queues()->queue(i);
  4528   void work(int i) {
  4529     if (i >= _n_workers) return;  // no work needed this round
  4531     double start_time_ms = os::elapsedTime() * 1000.0;
  4532     _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
  4534     ResourceMark rm;
  4535     HandleMark   hm;
  4537     G1ParScanThreadState            pss(_g1h, i);
  4538     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
  4539     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
  4540     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
  4542     pss.set_evac_closure(&scan_evac_cl);
  4543     pss.set_evac_failure_closure(&evac_failure_cl);
  4544     pss.set_partial_scan_closure(&partial_scan_cl);
  4546     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  4547     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  4548     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
  4549     G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  4551     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  4552     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  4553     G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  4555     OopsInHeapRegionClosure        *scan_root_cl;
  4556     OopsInHeapRegionClosure        *scan_perm_cl;
  4558     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  4559       scan_root_cl = &scan_mark_root_cl;
  4560       scan_perm_cl = &scan_mark_perm_cl;
  4561     } else {
  4562       scan_root_cl = &only_scan_root_cl;
  4563       scan_perm_cl = &only_scan_perm_cl;
  4566     pss.start_strong_roots();
  4567     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  4568                                   SharedHeap::SO_AllClasses,
  4569                                   scan_root_cl,
  4570                                   &push_heap_rs_cl,
  4571                                   scan_perm_cl,
  4572                                   i);
  4573     pss.end_strong_roots();
  4575       double start = os::elapsedTime();
  4576       G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  4577       evac.do_void();
  4578       double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  4579       double term_ms = pss.term_time()*1000.0;
  4580       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
  4581       _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
  4583     _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  4584     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  4586     // Clean up any par-expanded rem sets.
  4587     HeapRegionRemSet::par_cleanup();
  4589     if (ParallelGCVerbose) {
  4590       MutexLocker x(stats_lock());
  4591       pss.print_termination_stats(i);
  4594     assert(pss.refs()->is_empty(), "should be empty");
  4595     double end_time_ms = os::elapsedTime() * 1000.0;
  4596     _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
  4598 };
  4600 // *** Common G1 Evacuation Stuff
  4602 // This method is run in a GC worker.
  4604 void
  4605 G1CollectedHeap::
  4606 g1_process_strong_roots(bool collecting_perm_gen,
  4607                         SharedHeap::ScanningOption so,
  4608                         OopClosure* scan_non_heap_roots,
  4609                         OopsInHeapRegionClosure* scan_rs,
  4610                         OopsInGenClosure* scan_perm,
  4611                         int worker_i) {
  4612   // First scan the strong roots, including the perm gen.
  4613   double ext_roots_start = os::elapsedTime();
  4614   double closure_app_time_sec = 0.0;
  4616   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4617   BufferingOopsInGenClosure buf_scan_perm(scan_perm);
  4618   buf_scan_perm.set_generation(perm_gen());
  4620   // Walk the code cache w/o buffering, because StarTask cannot handle
  4621   // unaligned oop locations.
  4622   CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true);
  4624   process_strong_roots(false, // no scoping; this is parallel code
  4625                        collecting_perm_gen, so,
  4626                        &buf_scan_non_heap_roots,
  4627                        &eager_scan_code_roots,
  4628                        &buf_scan_perm);
  4630   // Finish up any enqueued closure apps.
  4631   buf_scan_non_heap_roots.done();
  4632   buf_scan_perm.done();
  4633   double ext_roots_end = os::elapsedTime();
  4634   g1_policy()->reset_obj_copy_time(worker_i);
  4635   double obj_copy_time_sec =
  4636     buf_scan_non_heap_roots.closure_app_seconds() +
  4637     buf_scan_perm.closure_app_seconds();
  4638   g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  4639   double ext_root_time_ms =
  4640     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  4641   g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  4643   // Scan strong roots in mark stack.
  4644   if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
  4645     concurrent_mark()->oops_do(scan_non_heap_roots);
  4647   double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
  4648   g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
  4650   // XXX What should this be doing in the parallel case?
  4651   g1_policy()->record_collection_pause_end_CH_strong_roots();
  4652   // Now scan the complement of the collection set.
  4653   if (scan_rs != NULL) {
  4654     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
  4656   // Finish with the ref_processor roots.
  4657   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4658     // We need to treat the discovered reference lists as roots and
  4659     // keep entries (which are added by the marking threads) on them
  4660     // live until they can be processed at the end of marking.
  4661     ref_processor()->weak_oops_do(scan_non_heap_roots);
  4662     ref_processor()->oops_do(scan_non_heap_roots);
  4664   g1_policy()->record_collection_pause_end_G1_strong_roots();
  4665   _process_strong_tasks->all_tasks_completed();
  4668 void
  4669 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
  4670                                        OopClosure* non_root_closure) {
  4671   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
  4672   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
  4676 class SaveMarksClosure: public HeapRegionClosure {
  4677 public:
  4678   bool doHeapRegion(HeapRegion* r) {
  4679     r->save_marks();
  4680     return false;
  4682 };
  4684 void G1CollectedHeap::save_marks() {
  4685   if (!CollectedHeap::use_parallel_gc_threads()) {
  4686     SaveMarksClosure sm;
  4687     heap_region_iterate(&sm);
  4689   // We do this even in the parallel case
  4690   perm_gen()->save_marks();
  4693 void G1CollectedHeap::evacuate_collection_set() {
  4694   set_evacuation_failed(false);
  4696   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  4697   concurrent_g1_refine()->set_use_cache(false);
  4698   concurrent_g1_refine()->clear_hot_cache_claimed_index();
  4700   int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
  4701   set_par_threads(n_workers);
  4702   G1ParTask g1_par_task(this, n_workers, _task_queues);
  4704   init_for_evac_failure(NULL);
  4706   rem_set()->prepare_for_younger_refs_iterate(true);
  4708   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  4709   double start_par = os::elapsedTime();
  4710   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4711     // The individual threads will set their evac-failure closures.
  4712     StrongRootsScope srs(this);
  4713     if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  4714     workers()->run_task(&g1_par_task);
  4715   } else {
  4716     StrongRootsScope srs(this);
  4717     g1_par_task.work(0);
  4720   double par_time = (os::elapsedTime() - start_par) * 1000.0;
  4721   g1_policy()->record_par_time(par_time);
  4722   set_par_threads(0);
  4723   // Is this the right thing to do here?  We don't save marks
  4724   // on individual heap regions when we allocate from
  4725   // them in parallel, so this seems like the correct place for this.
  4726   retire_all_alloc_regions();
  4728   // Weak root processing.
  4729   // Note: when JSR 292 is enabled and code blobs can contain
  4730   // non-perm oops then we will need to process the code blobs
  4731   // here too.
  4733     G1IsAliveClosure is_alive(this);
  4734     G1KeepAliveClosure keep_alive(this);
  4735     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  4737   release_gc_alloc_regions(false /* totally */);
  4738   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  4740   concurrent_g1_refine()->clear_hot_cache();
  4741   concurrent_g1_refine()->set_use_cache(true);
  4743   finalize_for_evac_failure();
  4745   // Must do this before removing self-forwarding pointers, which clears
  4746   // the per-region evac-failure flags.
  4747   concurrent_mark()->complete_marking_in_collection_set();
  4749   if (evacuation_failed()) {
  4750     remove_self_forwarding_pointers();
  4751     if (PrintGCDetails) {
  4752       gclog_or_tty->print(" (to-space overflow)");
  4753     } else if (PrintGC) {
  4754       gclog_or_tty->print("--");
  4758   if (G1DeferredRSUpdate) {
  4759     RedirtyLoggedCardTableEntryFastClosure redirty;
  4760     dirty_card_queue_set().set_closure(&redirty);
  4761     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  4763     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
  4764     dcq.merge_bufferlists(&dirty_card_queue_set());
  4765     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  4767   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4770 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
  4771                                      size_t* pre_used,
  4772                                      FreeRegionList* free_list,
  4773                                      HumongousRegionSet* humongous_proxy_set,
  4774                                      HRRSCleanupTask* hrrs_cleanup_task,
  4775                                      bool par) {
  4776   if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
  4777     if (hr->isHumongous()) {
  4778       assert(hr->startsHumongous(), "we should only see starts humongous");
  4779       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
  4780     } else {
  4781       free_region(hr, pre_used, free_list, par);
  4783   } else {
  4784     hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
  4788 void G1CollectedHeap::free_region(HeapRegion* hr,
  4789                                   size_t* pre_used,
  4790                                   FreeRegionList* free_list,
  4791                                   bool par) {
  4792   assert(!hr->isHumongous(), "this is only for non-humongous regions");
  4793   assert(!hr->is_empty(), "the region should not be empty");
  4794   assert(free_list != NULL, "pre-condition");
  4796   *pre_used += hr->used();
  4797   hr->hr_clear(par, true /* clear_space */);
  4798   free_list->add_as_head(hr);
  4801 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
  4802                                      size_t* pre_used,
  4803                                      FreeRegionList* free_list,
  4804                                      HumongousRegionSet* humongous_proxy_set,
  4805                                      bool par) {
  4806   assert(hr->startsHumongous(), "this is only for starts humongous regions");
  4807   assert(free_list != NULL, "pre-condition");
  4808   assert(humongous_proxy_set != NULL, "pre-condition");
  4810   size_t hr_used = hr->used();
  4811   size_t hr_capacity = hr->capacity();
  4812   size_t hr_pre_used = 0;
  4813   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
  4814   hr->set_notHumongous();
  4815   free_region(hr, &hr_pre_used, free_list, par);
  4817   int i = hr->hrs_index() + 1;
  4818   size_t num = 1;
  4819   while ((size_t) i < n_regions()) {
  4820     HeapRegion* curr_hr = _hrs->at(i);
  4821     if (!curr_hr->continuesHumongous()) {
  4822       break;
  4824     curr_hr->set_notHumongous();
  4825     free_region(curr_hr, &hr_pre_used, free_list, par);
  4826     num += 1;
  4827     i += 1;
  4829   assert(hr_pre_used == hr_used,
  4830          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
  4831                  "should be the same", hr_pre_used, hr_used));
  4832   *pre_used += hr_pre_used;
  4835 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
  4836                                        FreeRegionList* free_list,
  4837                                        HumongousRegionSet* humongous_proxy_set,
  4838                                        bool par) {
  4839   if (pre_used > 0) {
  4840     Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
  4841     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  4842     assert(_summary_bytes_used >= pre_used,
  4843            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
  4844                    "should be >= pre_used: "SIZE_FORMAT,
  4845                    _summary_bytes_used, pre_used));
  4846     _summary_bytes_used -= pre_used;
  4848   if (free_list != NULL && !free_list->is_empty()) {
  4849     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  4850     _free_list.add_as_head(free_list);
  4852   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
  4853     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
  4854     _humongous_set.update_from_proxy(humongous_proxy_set);
  4858 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
  4859   while (list != NULL) {
  4860     guarantee( list->is_young(), "invariant" );
  4862     HeapWord* bottom = list->bottom();
  4863     HeapWord* end = list->end();
  4864     MemRegion mr(bottom, end);
  4865     ct_bs->dirty(mr);
  4867     list = list->get_next_young_region();
  4872 class G1ParCleanupCTTask : public AbstractGangTask {
  4873   CardTableModRefBS* _ct_bs;
  4874   G1CollectedHeap* _g1h;
  4875   HeapRegion* volatile _su_head;
  4876 public:
  4877   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
  4878                      G1CollectedHeap* g1h,
  4879                      HeapRegion* survivor_list) :
  4880     AbstractGangTask("G1 Par Cleanup CT Task"),
  4881     _ct_bs(ct_bs),
  4882     _g1h(g1h),
  4883     _su_head(survivor_list)
  4884   { }
  4886   void work(int i) {
  4887     HeapRegion* r;
  4888     while (r = _g1h->pop_dirty_cards_region()) {
  4889       clear_cards(r);
  4891     // Redirty the cards of the survivor regions.
  4892     dirty_list(&this->_su_head);
  4895   void clear_cards(HeapRegion* r) {
  4896     // Cards for Survivor regions will be dirtied later.
  4897     if (!r->is_survivor()) {
  4898       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  4902   void dirty_list(HeapRegion* volatile * head_ptr) {
  4903     HeapRegion* head;
  4904     do {
  4905       // Pop region off the list.
  4906       head = *head_ptr;
  4907       if (head != NULL) {
  4908         HeapRegion* r = (HeapRegion*)
  4909           Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
  4910         if (r == head) {
  4911           assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
  4912           _ct_bs->dirty(MemRegion(r->bottom(), r->end()));
  4915     } while (*head_ptr != NULL);
  4917 };
  4920 #ifndef PRODUCT
  4921 class G1VerifyCardTableCleanup: public HeapRegionClosure {
  4922   CardTableModRefBS* _ct_bs;
  4923 public:
  4924   G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
  4925     : _ct_bs(ct_bs) { }
  4926   virtual bool doHeapRegion(HeapRegion* r) {
  4927     MemRegion mr(r->bottom(), r->end());
  4928     if (r->is_survivor()) {
  4929       _ct_bs->verify_dirty_region(mr);
  4930     } else {
  4931       _ct_bs->verify_clean_region(mr);
  4933     return false;
  4935 };
  4937 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
  4938   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  4939   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
  4940     // We cannot guarantee that [bottom(),end()] is dirty.  Threads
  4941     // dirty allocated blocks as they allocate them. The thread that
  4942     // retires each region and replaces it with a new one will do a
  4943     // maximal allocation to fill in [pre_dummy_top(),end()] but will
  4944     // not dirty that area (one less thing to have to do while holding
  4945     // a lock). So we can only verify that [bottom(),pre_dummy_top()]
  4946     // is dirty. Also note that verify_dirty_region() requires
  4947     // mr.start() and mr.end() to be card aligned and pre_dummy_top()
  4948     // is not guaranteed to be.
  4949     MemRegion mr(hr->bottom(),
  4950                  ct_bs->align_to_card_boundary(hr->pre_dummy_top()));
  4951     ct_bs->verify_dirty_region(mr);
  4955 void G1CollectedHeap::verify_dirty_young_regions() {
  4956   verify_dirty_young_list(_young_list->first_region());
  4957   verify_dirty_young_list(_young_list->first_survivor_region());
  4959 #endif
  4961 void G1CollectedHeap::cleanUpCardTable() {
  4962   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  4963   double start = os::elapsedTime();
  4965   // Iterate over the dirty cards region list.
  4966   G1ParCleanupCTTask cleanup_task(ct_bs, this,
  4967                                   _young_list->first_survivor_region());
  4969   if (ParallelGCThreads > 0) {
  4970     set_par_threads(workers()->total_workers());
  4971     workers()->run_task(&cleanup_task);
  4972     set_par_threads(0);
  4973   } else {
  4974     while (_dirty_cards_region_list) {
  4975       HeapRegion* r = _dirty_cards_region_list;
  4976       cleanup_task.clear_cards(r);
  4977       _dirty_cards_region_list = r->get_next_dirty_cards_region();
  4978       if (_dirty_cards_region_list == r) {
  4979         // The last region.
  4980         _dirty_cards_region_list = NULL;
  4982       r->set_next_dirty_cards_region(NULL);
  4984     // now, redirty the cards of the survivor regions
  4985     // (it seemed faster to do it this way, instead of iterating over
  4986     // all regions and then clearing / dirtying as appropriate)
  4987     dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
  4990   double elapsed = os::elapsedTime() - start;
  4991   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
  4992 #ifndef PRODUCT
  4993   if (G1VerifyCTCleanup || VerifyAfterGC) {
  4994     G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
  4995     heap_region_iterate(&cleanup_verifier);
  4997 #endif
  5000 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  5001   size_t pre_used = 0;
  5002   FreeRegionList local_free_list("Local List for CSet Freeing");
  5004   double young_time_ms     = 0.0;
  5005   double non_young_time_ms = 0.0;
  5007   // Since the collection set is a superset of the the young list,
  5008   // all we need to do to clear the young list is clear its
  5009   // head and length, and unlink any young regions in the code below
  5010   _young_list->clear();
  5012   G1CollectorPolicy* policy = g1_policy();
  5014   double start_sec = os::elapsedTime();
  5015   bool non_young = true;
  5017   HeapRegion* cur = cs_head;
  5018   int age_bound = -1;
  5019   size_t rs_lengths = 0;
  5021   while (cur != NULL) {
  5022     assert(!is_on_master_free_list(cur), "sanity");
  5024     if (non_young) {
  5025       if (cur->is_young()) {
  5026         double end_sec = os::elapsedTime();
  5027         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5028         non_young_time_ms += elapsed_ms;
  5030         start_sec = os::elapsedTime();
  5031         non_young = false;
  5033     } else {
  5034       double end_sec = os::elapsedTime();
  5035       double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5036       young_time_ms += elapsed_ms;
  5038       start_sec = os::elapsedTime();
  5039       non_young = true;
  5042     rs_lengths += cur->rem_set()->occupied();
  5044     HeapRegion* next = cur->next_in_collection_set();
  5045     assert(cur->in_collection_set(), "bad CS");
  5046     cur->set_next_in_collection_set(NULL);
  5047     cur->set_in_collection_set(false);
  5049     if (cur->is_young()) {
  5050       int index = cur->young_index_in_cset();
  5051       guarantee( index != -1, "invariant" );
  5052       guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
  5053       size_t words_survived = _surviving_young_words[index];
  5054       cur->record_surv_words_in_group(words_survived);
  5056       // At this point the we have 'popped' cur from the collection set
  5057       // (linked via next_in_collection_set()) but it is still in the
  5058       // young list (linked via next_young_region()). Clear the
  5059       // _next_young_region field.
  5060       cur->set_next_young_region(NULL);
  5061     } else {
  5062       int index = cur->young_index_in_cset();
  5063       guarantee( index == -1, "invariant" );
  5066     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  5067             (!cur->is_young() && cur->young_index_in_cset() == -1),
  5068             "invariant" );
  5070     if (!cur->evacuation_failed()) {
  5071       // And the region is empty.
  5072       assert(!cur->is_empty(), "Should not have empty regions in a CS.");
  5073       free_region(cur, &pre_used, &local_free_list, false /* par */);
  5074     } else {
  5075       cur->uninstall_surv_rate_group();
  5076       if (cur->is_young())
  5077         cur->set_young_index_in_cset(-1);
  5078       cur->set_not_young();
  5079       cur->set_evacuation_failed(false);
  5081     cur = next;
  5084   policy->record_max_rs_lengths(rs_lengths);
  5085   policy->cset_regions_freed();
  5087   double end_sec = os::elapsedTime();
  5088   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5089   if (non_young)
  5090     non_young_time_ms += elapsed_ms;
  5091   else
  5092     young_time_ms += elapsed_ms;
  5094   update_sets_after_freeing_regions(pre_used, &local_free_list,
  5095                                     NULL /* humongous_proxy_set */,
  5096                                     false /* par */);
  5097   policy->record_young_free_cset_time_ms(young_time_ms);
  5098   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
  5101 // This routine is similar to the above but does not record
  5102 // any policy statistics or update free lists; we are abandoning
  5103 // the current incremental collection set in preparation of a
  5104 // full collection. After the full GC we will start to build up
  5105 // the incremental collection set again.
  5106 // This is only called when we're doing a full collection
  5107 // and is immediately followed by the tearing down of the young list.
  5109 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
  5110   HeapRegion* cur = cs_head;
  5112   while (cur != NULL) {
  5113     HeapRegion* next = cur->next_in_collection_set();
  5114     assert(cur->in_collection_set(), "bad CS");
  5115     cur->set_next_in_collection_set(NULL);
  5116     cur->set_in_collection_set(false);
  5117     cur->set_young_index_in_cset(-1);
  5118     cur = next;
  5122 void G1CollectedHeap::set_free_regions_coming() {
  5123   if (G1ConcRegionFreeingVerbose) {
  5124     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  5125                            "setting free regions coming");
  5128   assert(!free_regions_coming(), "pre-condition");
  5129   _free_regions_coming = true;
  5132 void G1CollectedHeap::reset_free_regions_coming() {
  5134     assert(free_regions_coming(), "pre-condition");
  5135     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  5136     _free_regions_coming = false;
  5137     SecondaryFreeList_lock->notify_all();
  5140   if (G1ConcRegionFreeingVerbose) {
  5141     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  5142                            "reset free regions coming");
  5146 void G1CollectedHeap::wait_while_free_regions_coming() {
  5147   // Most of the time we won't have to wait, so let's do a quick test
  5148   // first before we take the lock.
  5149   if (!free_regions_coming()) {
  5150     return;
  5153   if (G1ConcRegionFreeingVerbose) {
  5154     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  5155                            "waiting for free regions");
  5159     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  5160     while (free_regions_coming()) {
  5161       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
  5165   if (G1ConcRegionFreeingVerbose) {
  5166     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  5167                            "done waiting for free regions");
  5171 size_t G1CollectedHeap::n_regions() {
  5172   return _hrs->length();
  5175 size_t G1CollectedHeap::max_regions() {
  5176   return
  5177     (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) /
  5178     HeapRegion::GrainBytes;
  5181 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  5182   assert(heap_lock_held_for_gc(),
  5183               "the heap lock should already be held by or for this thread");
  5184   _young_list->push_region(hr);
  5185   g1_policy()->set_region_short_lived(hr);
  5188 class NoYoungRegionsClosure: public HeapRegionClosure {
  5189 private:
  5190   bool _success;
  5191 public:
  5192   NoYoungRegionsClosure() : _success(true) { }
  5193   bool doHeapRegion(HeapRegion* r) {
  5194     if (r->is_young()) {
  5195       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  5196                              r->bottom(), r->end());
  5197       _success = false;
  5199     return false;
  5201   bool success() { return _success; }
  5202 };
  5204 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
  5205   bool ret = _young_list->check_list_empty(check_sample);
  5207   if (check_heap) {
  5208     NoYoungRegionsClosure closure;
  5209     heap_region_iterate(&closure);
  5210     ret = ret && closure.success();
  5213   return ret;
  5216 void G1CollectedHeap::empty_young_list() {
  5217   assert(heap_lock_held_for_gc(),
  5218               "the heap lock should already be held by or for this thread");
  5219   assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
  5221   _young_list->empty_list();
  5224 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
  5225   bool no_allocs = true;
  5226   for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
  5227     HeapRegion* r = _gc_alloc_regions[ap];
  5228     no_allocs = r == NULL || r->saved_mark_at_top();
  5230   return no_allocs;
  5233 void G1CollectedHeap::retire_all_alloc_regions() {
  5234   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  5235     HeapRegion* r = _gc_alloc_regions[ap];
  5236     if (r != NULL) {
  5237       // Check for aliases.
  5238       bool has_processed_alias = false;
  5239       for (int i = 0; i < ap; ++i) {
  5240         if (_gc_alloc_regions[i] == r) {
  5241           has_processed_alias = true;
  5242           break;
  5245       if (!has_processed_alias) {
  5246         retire_alloc_region(r, false /* par */);
  5252 // Done at the start of full GC.
  5253 void G1CollectedHeap::tear_down_region_lists() {
  5254   _free_list.remove_all();
  5257 class RegionResetter: public HeapRegionClosure {
  5258   G1CollectedHeap* _g1h;
  5259   FreeRegionList _local_free_list;
  5261 public:
  5262   RegionResetter() : _g1h(G1CollectedHeap::heap()),
  5263                      _local_free_list("Local Free List for RegionResetter") { }
  5265   bool doHeapRegion(HeapRegion* r) {
  5266     if (r->continuesHumongous()) return false;
  5267     if (r->top() > r->bottom()) {
  5268       if (r->top() < r->end()) {
  5269         Copy::fill_to_words(r->top(),
  5270                           pointer_delta(r->end(), r->top()));
  5272     } else {
  5273       assert(r->is_empty(), "tautology");
  5274       _local_free_list.add_as_tail(r);
  5276     return false;
  5279   void update_free_lists() {
  5280     _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL,
  5281                                             false /* par */);
  5283 };
  5285 // Done at the end of full GC.
  5286 void G1CollectedHeap::rebuild_region_lists() {
  5287   // This needs to go at the end of the full GC.
  5288   RegionResetter rs;
  5289   heap_region_iterate(&rs);
  5290   rs.update_free_lists();
  5293 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  5294   _refine_cte_cl->set_concurrent(concurrent);
  5297 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  5298   HeapRegion* hr = heap_region_containing(p);
  5299   if (hr == NULL) {
  5300     return is_in_permanent(p);
  5301   } else {
  5302     return hr->is_in(p);
  5306 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
  5307                                                       bool force) {
  5308   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  5309   assert(!force || g1_policy()->can_expand_young_list(),
  5310          "if force is true we should be able to expand the young list");
  5311   if (force || !g1_policy()->is_young_list_full()) {
  5312     HeapRegion* new_alloc_region = new_region(word_size,
  5313                                               false /* do_expand */);
  5314     if (new_alloc_region != NULL) {
  5315       g1_policy()->update_region_num(true /* next_is_young */);
  5316       set_region_short_lived_locked(new_alloc_region);
  5317       return new_alloc_region;
  5320   return NULL;
  5323 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
  5324                                                   size_t allocated_bytes) {
  5325   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  5326   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
  5328   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
  5329   _summary_bytes_used += allocated_bytes;
  5332 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
  5333                                                     bool force) {
  5334   return _g1h->new_mutator_alloc_region(word_size, force);
  5337 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
  5338                                        size_t allocated_bytes) {
  5339   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
  5342 // Heap region set verification
  5344 class VerifyRegionListsClosure : public HeapRegionClosure {
  5345 private:
  5346   HumongousRegionSet* _humongous_set;
  5347   FreeRegionList*     _free_list;
  5348   size_t              _region_count;
  5350 public:
  5351   VerifyRegionListsClosure(HumongousRegionSet* humongous_set,
  5352                            FreeRegionList* free_list) :
  5353     _humongous_set(humongous_set), _free_list(free_list),
  5354     _region_count(0) { }
  5356   size_t region_count()      { return _region_count;      }
  5358   bool doHeapRegion(HeapRegion* hr) {
  5359     _region_count += 1;
  5361     if (hr->continuesHumongous()) {
  5362       return false;
  5365     if (hr->is_young()) {
  5366       // TODO
  5367     } else if (hr->startsHumongous()) {
  5368       _humongous_set->verify_next_region(hr);
  5369     } else if (hr->is_empty()) {
  5370       _free_list->verify_next_region(hr);
  5372     return false;
  5374 };
  5376 void G1CollectedHeap::verify_region_sets() {
  5377   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  5379   // First, check the explicit lists.
  5380   _free_list.verify();
  5382     // Given that a concurrent operation might be adding regions to
  5383     // the secondary free list we have to take the lock before
  5384     // verifying it.
  5385     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  5386     _secondary_free_list.verify();
  5388   _humongous_set.verify();
  5390   // If a concurrent region freeing operation is in progress it will
  5391   // be difficult to correctly attributed any free regions we come
  5392   // across to the correct free list given that they might belong to
  5393   // one of several (free_list, secondary_free_list, any local lists,
  5394   // etc.). So, if that's the case we will skip the rest of the
  5395   // verification operation. Alternatively, waiting for the concurrent
  5396   // operation to complete will have a non-trivial effect on the GC's
  5397   // operation (no concurrent operation will last longer than the
  5398   // interval between two calls to verification) and it might hide
  5399   // any issues that we would like to catch during testing.
  5400   if (free_regions_coming()) {
  5401     return;
  5404   // Make sure we append the secondary_free_list on the free_list so
  5405   // that all free regions we will come across can be safely
  5406   // attributed to the free_list.
  5407   append_secondary_free_list_if_not_empty_with_lock();
  5409   // Finally, make sure that the region accounting in the lists is
  5410   // consistent with what we see in the heap.
  5411   _humongous_set.verify_start();
  5412   _free_list.verify_start();
  5414   VerifyRegionListsClosure cl(&_humongous_set, &_free_list);
  5415   heap_region_iterate(&cl);
  5417   _humongous_set.verify_end();
  5418   _free_list.verify_end();

mercurial