src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Tue, 25 Jan 2011 17:58:19 -0500

author
tonyp
date
Tue, 25 Jan 2011 17:58:19 -0500
changeset 2493
97ba643ea3ed
parent 2472
0fa27f37d4d4
child 2504
c33825b68624
permissions
-rw-r--r--

7014261: G1: RSet-related failures
Summary: A race between the concurrent cleanup thread and the VM thread while it is processing the "expanded sparse table list" causes both threads to try to free the same sparse table entry and either causes one of the threads to fail or leaves the entry in an inconsistent state. The solution is purge all entries on the expanded list that correspond go regions that are being cleaned up.
Reviewed-by: brutisso, johnc

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/icBuffer.hpp"
    27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
    28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    31 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    32 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    33 #include "gc_implementation/g1/g1MarkSweep.hpp"
    34 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    35 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    38 #include "gc_implementation/g1/vm_operations_g1.hpp"
    39 #include "gc_implementation/shared/isGCActiveMark.hpp"
    40 #include "memory/gcLocker.inline.hpp"
    41 #include "memory/genOopClosures.inline.hpp"
    42 #include "memory/generationSpec.hpp"
    43 #include "oops/oop.inline.hpp"
    44 #include "oops/oop.pcgc.inline.hpp"
    45 #include "runtime/aprofiler.hpp"
    46 #include "runtime/vmThread.hpp"
    48 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
    50 // turn it on so that the contents of the young list (scan-only /
    51 // to-be-collected) are printed at "strategic" points before / during
    52 // / after the collection --- this is useful for debugging
    53 #define YOUNG_LIST_VERBOSE 0
    54 // CURRENT STATUS
    55 // This file is under construction.  Search for "FIXME".
    57 // INVARIANTS/NOTES
    58 //
    59 // All allocation activity covered by the G1CollectedHeap interface is
    60 // serialized by acquiring the HeapLock.  This happens in mem_allocate
    61 // and allocate_new_tlab, which are the "entry" points to the
    62 // allocation code from the rest of the JVM.  (Note that this does not
    63 // apply to TLAB allocation, which is not part of this interface: it
    64 // is done by clients of this interface.)
    66 // Local to this file.
    68 class RefineCardTableEntryClosure: public CardTableEntryClosure {
    69   SuspendibleThreadSet* _sts;
    70   G1RemSet* _g1rs;
    71   ConcurrentG1Refine* _cg1r;
    72   bool _concurrent;
    73 public:
    74   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
    75                               G1RemSet* g1rs,
    76                               ConcurrentG1Refine* cg1r) :
    77     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
    78   {}
    79   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    80     bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
    81     // This path is executed by the concurrent refine or mutator threads,
    82     // concurrently, and so we do not care if card_ptr contains references
    83     // that point into the collection set.
    84     assert(!oops_into_cset, "should be");
    86     if (_concurrent && _sts->should_yield()) {
    87       // Caller will actually yield.
    88       return false;
    89     }
    90     // Otherwise, we finished successfully; return true.
    91     return true;
    92   }
    93   void set_concurrent(bool b) { _concurrent = b; }
    94 };
    97 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
    98   int _calls;
    99   G1CollectedHeap* _g1h;
   100   CardTableModRefBS* _ctbs;
   101   int _histo[256];
   102 public:
   103   ClearLoggedCardTableEntryClosure() :
   104     _calls(0)
   105   {
   106     _g1h = G1CollectedHeap::heap();
   107     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   108     for (int i = 0; i < 256; i++) _histo[i] = 0;
   109   }
   110   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   111     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   112       _calls++;
   113       unsigned char* ujb = (unsigned char*)card_ptr;
   114       int ind = (int)(*ujb);
   115       _histo[ind]++;
   116       *card_ptr = -1;
   117     }
   118     return true;
   119   }
   120   int calls() { return _calls; }
   121   void print_histo() {
   122     gclog_or_tty->print_cr("Card table value histogram:");
   123     for (int i = 0; i < 256; i++) {
   124       if (_histo[i] != 0) {
   125         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
   126       }
   127     }
   128   }
   129 };
   131 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
   132   int _calls;
   133   G1CollectedHeap* _g1h;
   134   CardTableModRefBS* _ctbs;
   135 public:
   136   RedirtyLoggedCardTableEntryClosure() :
   137     _calls(0)
   138   {
   139     _g1h = G1CollectedHeap::heap();
   140     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   141   }
   142   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   143     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   144       _calls++;
   145       *card_ptr = 0;
   146     }
   147     return true;
   148   }
   149   int calls() { return _calls; }
   150 };
   152 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
   153 public:
   154   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   155     *card_ptr = CardTableModRefBS::dirty_card_val();
   156     return true;
   157   }
   158 };
   160 YoungList::YoungList(G1CollectedHeap* g1h)
   161   : _g1h(g1h), _head(NULL),
   162     _length(0),
   163     _last_sampled_rs_lengths(0),
   164     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
   165 {
   166   guarantee( check_list_empty(false), "just making sure..." );
   167 }
   169 void YoungList::push_region(HeapRegion *hr) {
   170   assert(!hr->is_young(), "should not already be young");
   171   assert(hr->get_next_young_region() == NULL, "cause it should!");
   173   hr->set_next_young_region(_head);
   174   _head = hr;
   176   hr->set_young();
   177   double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
   178   ++_length;
   179 }
   181 void YoungList::add_survivor_region(HeapRegion* hr) {
   182   assert(hr->is_survivor(), "should be flagged as survivor region");
   183   assert(hr->get_next_young_region() == NULL, "cause it should!");
   185   hr->set_next_young_region(_survivor_head);
   186   if (_survivor_head == NULL) {
   187     _survivor_tail = hr;
   188   }
   189   _survivor_head = hr;
   191   ++_survivor_length;
   192 }
   194 void YoungList::empty_list(HeapRegion* list) {
   195   while (list != NULL) {
   196     HeapRegion* next = list->get_next_young_region();
   197     list->set_next_young_region(NULL);
   198     list->uninstall_surv_rate_group();
   199     list->set_not_young();
   200     list = next;
   201   }
   202 }
   204 void YoungList::empty_list() {
   205   assert(check_list_well_formed(), "young list should be well formed");
   207   empty_list(_head);
   208   _head = NULL;
   209   _length = 0;
   211   empty_list(_survivor_head);
   212   _survivor_head = NULL;
   213   _survivor_tail = NULL;
   214   _survivor_length = 0;
   216   _last_sampled_rs_lengths = 0;
   218   assert(check_list_empty(false), "just making sure...");
   219 }
   221 bool YoungList::check_list_well_formed() {
   222   bool ret = true;
   224   size_t length = 0;
   225   HeapRegion* curr = _head;
   226   HeapRegion* last = NULL;
   227   while (curr != NULL) {
   228     if (!curr->is_young()) {
   229       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   230                              "incorrectly tagged (y: %d, surv: %d)",
   231                              curr->bottom(), curr->end(),
   232                              curr->is_young(), curr->is_survivor());
   233       ret = false;
   234     }
   235     ++length;
   236     last = curr;
   237     curr = curr->get_next_young_region();
   238   }
   239   ret = ret && (length == _length);
   241   if (!ret) {
   242     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   243     gclog_or_tty->print_cr("###   list has %d entries, _length is %d",
   244                            length, _length);
   245   }
   247   return ret;
   248 }
   250 bool YoungList::check_list_empty(bool check_sample) {
   251   bool ret = true;
   253   if (_length != 0) {
   254     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
   255                   _length);
   256     ret = false;
   257   }
   258   if (check_sample && _last_sampled_rs_lengths != 0) {
   259     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   260     ret = false;
   261   }
   262   if (_head != NULL) {
   263     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   264     ret = false;
   265   }
   266   if (!ret) {
   267     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   268   }
   270   return ret;
   271 }
   273 void
   274 YoungList::rs_length_sampling_init() {
   275   _sampled_rs_lengths = 0;
   276   _curr               = _head;
   277 }
   279 bool
   280 YoungList::rs_length_sampling_more() {
   281   return _curr != NULL;
   282 }
   284 void
   285 YoungList::rs_length_sampling_next() {
   286   assert( _curr != NULL, "invariant" );
   287   size_t rs_length = _curr->rem_set()->occupied();
   289   _sampled_rs_lengths += rs_length;
   291   // The current region may not yet have been added to the
   292   // incremental collection set (it gets added when it is
   293   // retired as the current allocation region).
   294   if (_curr->in_collection_set()) {
   295     // Update the collection set policy information for this region
   296     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
   297   }
   299   _curr = _curr->get_next_young_region();
   300   if (_curr == NULL) {
   301     _last_sampled_rs_lengths = _sampled_rs_lengths;
   302     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   303   }
   304 }
   306 void
   307 YoungList::reset_auxilary_lists() {
   308   guarantee( is_empty(), "young list should be empty" );
   309   assert(check_list_well_formed(), "young list should be well formed");
   311   // Add survivor regions to SurvRateGroup.
   312   _g1h->g1_policy()->note_start_adding_survivor_regions();
   313   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   315   for (HeapRegion* curr = _survivor_head;
   316        curr != NULL;
   317        curr = curr->get_next_young_region()) {
   318     _g1h->g1_policy()->set_region_survivors(curr);
   320     // The region is a non-empty survivor so let's add it to
   321     // the incremental collection set for the next evacuation
   322     // pause.
   323     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   324   }
   325   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   327   _head   = _survivor_head;
   328   _length = _survivor_length;
   329   if (_survivor_head != NULL) {
   330     assert(_survivor_tail != NULL, "cause it shouldn't be");
   331     assert(_survivor_length > 0, "invariant");
   332     _survivor_tail->set_next_young_region(NULL);
   333   }
   335   // Don't clear the survivor list handles until the start of
   336   // the next evacuation pause - we need it in order to re-tag
   337   // the survivor regions from this evacuation pause as 'young'
   338   // at the start of the next.
   340   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   342   assert(check_list_well_formed(), "young list should be well formed");
   343 }
   345 void YoungList::print() {
   346   HeapRegion* lists[] = {_head,   _survivor_head};
   347   const char* names[] = {"YOUNG", "SURVIVOR"};
   349   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   350     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   351     HeapRegion *curr = lists[list];
   352     if (curr == NULL)
   353       gclog_or_tty->print_cr("  empty");
   354     while (curr != NULL) {
   355       gclog_or_tty->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
   356                              "age: %4d, y: %d, surv: %d",
   357                              curr->bottom(), curr->end(),
   358                              curr->top(),
   359                              curr->prev_top_at_mark_start(),
   360                              curr->next_top_at_mark_start(),
   361                              curr->top_at_conc_mark_count(),
   362                              curr->age_in_surv_rate_group_cond(),
   363                              curr->is_young(),
   364                              curr->is_survivor());
   365       curr = curr->get_next_young_region();
   366     }
   367   }
   369   gclog_or_tty->print_cr("");
   370 }
   372 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   373 {
   374   // Claim the right to put the region on the dirty cards region list
   375   // by installing a self pointer.
   376   HeapRegion* next = hr->get_next_dirty_cards_region();
   377   if (next == NULL) {
   378     HeapRegion* res = (HeapRegion*)
   379       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   380                           NULL);
   381     if (res == NULL) {
   382       HeapRegion* head;
   383       do {
   384         // Put the region to the dirty cards region list.
   385         head = _dirty_cards_region_list;
   386         next = (HeapRegion*)
   387           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   388         if (next == head) {
   389           assert(hr->get_next_dirty_cards_region() == hr,
   390                  "hr->get_next_dirty_cards_region() != hr");
   391           if (next == NULL) {
   392             // The last region in the list points to itself.
   393             hr->set_next_dirty_cards_region(hr);
   394           } else {
   395             hr->set_next_dirty_cards_region(next);
   396           }
   397         }
   398       } while (next != head);
   399     }
   400   }
   401 }
   403 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   404 {
   405   HeapRegion* head;
   406   HeapRegion* hr;
   407   do {
   408     head = _dirty_cards_region_list;
   409     if (head == NULL) {
   410       return NULL;
   411     }
   412     HeapRegion* new_head = head->get_next_dirty_cards_region();
   413     if (head == new_head) {
   414       // The last region.
   415       new_head = NULL;
   416     }
   417     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   418                                           head);
   419   } while (hr != head);
   420   assert(hr != NULL, "invariant");
   421   hr->set_next_dirty_cards_region(NULL);
   422   return hr;
   423 }
   425 void G1CollectedHeap::stop_conc_gc_threads() {
   426   _cg1r->stop();
   427   _cmThread->stop();
   428 }
   430 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   431   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   432   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   434   // Count the dirty cards at the start.
   435   CountNonCleanMemRegionClosure count1(this);
   436   ct_bs->mod_card_iterate(&count1);
   437   int orig_count = count1.n();
   439   // First clear the logged cards.
   440   ClearLoggedCardTableEntryClosure clear;
   441   dcqs.set_closure(&clear);
   442   dcqs.apply_closure_to_all_completed_buffers();
   443   dcqs.iterate_closure_all_threads(false);
   444   clear.print_histo();
   446   // Now ensure that there's no dirty cards.
   447   CountNonCleanMemRegionClosure count2(this);
   448   ct_bs->mod_card_iterate(&count2);
   449   if (count2.n() != 0) {
   450     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   451                            count2.n(), orig_count);
   452   }
   453   guarantee(count2.n() == 0, "Card table should be clean.");
   455   RedirtyLoggedCardTableEntryClosure redirty;
   456   JavaThread::dirty_card_queue_set().set_closure(&redirty);
   457   dcqs.apply_closure_to_all_completed_buffers();
   458   dcqs.iterate_closure_all_threads(false);
   459   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   460                          clear.calls(), orig_count);
   461   guarantee(redirty.calls() == clear.calls(),
   462             "Or else mechanism is broken.");
   464   CountNonCleanMemRegionClosure count3(this);
   465   ct_bs->mod_card_iterate(&count3);
   466   if (count3.n() != orig_count) {
   467     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   468                            orig_count, count3.n());
   469     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   470   }
   472   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
   473 }
   475 // Private class members.
   477 G1CollectedHeap* G1CollectedHeap::_g1h;
   479 // Private methods.
   481 HeapRegion*
   482 G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) {
   483   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   484   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
   485     if (!_secondary_free_list.is_empty()) {
   486       if (G1ConcRegionFreeingVerbose) {
   487         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   488                                "secondary_free_list has "SIZE_FORMAT" entries",
   489                                _secondary_free_list.length());
   490       }
   491       // It looks as if there are free regions available on the
   492       // secondary_free_list. Let's move them to the free_list and try
   493       // again to allocate from it.
   494       append_secondary_free_list();
   496       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
   497              "empty we should have moved at least one entry to the free_list");
   498       HeapRegion* res = _free_list.remove_head();
   499       if (G1ConcRegionFreeingVerbose) {
   500         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   501                                "allocated "HR_FORMAT" from secondary_free_list",
   502                                HR_FORMAT_PARAMS(res));
   503       }
   504       return res;
   505     }
   507     // Wait here until we get notifed either when (a) there are no
   508     // more free regions coming or (b) some regions have been moved on
   509     // the secondary_free_list.
   510     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
   511   }
   513   if (G1ConcRegionFreeingVerbose) {
   514     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   515                            "could not allocate from secondary_free_list");
   516   }
   517   return NULL;
   518 }
   520 HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
   521                                              bool do_expand) {
   522   assert(!isHumongous(word_size) ||
   523                                   word_size <= (size_t) HeapRegion::GrainWords,
   524          "the only time we use this to allocate a humongous region is "
   525          "when we are allocating a single humongous region");
   527   HeapRegion* res;
   528   if (G1StressConcRegionFreeing) {
   529     if (!_secondary_free_list.is_empty()) {
   530       if (G1ConcRegionFreeingVerbose) {
   531         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   532                                "forced to look at the secondary_free_list");
   533       }
   534       res = new_region_try_secondary_free_list(word_size);
   535       if (res != NULL) {
   536         return res;
   537       }
   538     }
   539   }
   540   res = _free_list.remove_head_or_null();
   541   if (res == NULL) {
   542     if (G1ConcRegionFreeingVerbose) {
   543       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
   544                              "res == NULL, trying the secondary_free_list");
   545     }
   546     res = new_region_try_secondary_free_list(word_size);
   547   }
   548   if (res == NULL && do_expand) {
   549     expand(word_size * HeapWordSize);
   550     res = _free_list.remove_head_or_null();
   551   }
   552   if (res != NULL) {
   553     if (G1PrintHeapRegions) {
   554       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT","PTR_FORMAT"], "
   555                              "top "PTR_FORMAT, res->hrs_index(),
   556                              res->bottom(), res->end(), res->top());
   557     }
   558   }
   559   return res;
   560 }
   562 HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
   563                                                  size_t word_size) {
   564   HeapRegion* alloc_region = NULL;
   565   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   566     alloc_region = new_region_work(word_size, true /* do_expand */);
   567     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   568       alloc_region->set_survivor();
   569     }
   570     ++_gc_alloc_region_counts[purpose];
   571   } else {
   572     g1_policy()->note_alloc_region_limit_reached(purpose);
   573   }
   574   return alloc_region;
   575 }
   577 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
   578                                                        size_t word_size) {
   579   int first = -1;
   580   if (num_regions == 1) {
   581     // Only one region to allocate, no need to go through the slower
   582     // path. The caller will attempt the expasion if this fails, so
   583     // let's not try to expand here too.
   584     HeapRegion* hr = new_region_work(word_size, false /* do_expand */);
   585     if (hr != NULL) {
   586       first = hr->hrs_index();
   587     } else {
   588       first = -1;
   589     }
   590   } else {
   591     // We can't allocate humongous regions while cleanupComplete() is
   592     // running, since some of the regions we find to be empty might not
   593     // yet be added to the free list and it is not straightforward to
   594     // know which list they are on so that we can remove them. Note
   595     // that we only need to do this if we need to allocate more than
   596     // one region to satisfy the current humongous allocation
   597     // request. If we are only allocating one region we use the common
   598     // region allocation code (see above).
   599     wait_while_free_regions_coming();
   600     append_secondary_free_list_if_not_empty();
   602     if (free_regions() >= num_regions) {
   603       first = _hrs->find_contiguous(num_regions);
   604       if (first != -1) {
   605         for (int i = first; i < first + (int) num_regions; ++i) {
   606           HeapRegion* hr = _hrs->at(i);
   607           assert(hr->is_empty(), "sanity");
   608           assert(is_on_free_list(hr), "sanity");
   609           hr->set_pending_removal(true);
   610         }
   611         _free_list.remove_all_pending(num_regions);
   612       }
   613     }
   614   }
   615   return first;
   616 }
   618 // If could fit into free regions w/o expansion, try.
   619 // Otherwise, if can expand, do so.
   620 // Otherwise, if using ex regions might help, try with ex given back.
   621 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   622   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   624   verify_region_sets_optional();
   626   size_t num_regions =
   627          round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   628   size_t x_size = expansion_regions();
   629   size_t fs = _hrs->free_suffix();
   630   int first = humongous_obj_allocate_find_first(num_regions, word_size);
   631   if (first == -1) {
   632     // The only thing we can do now is attempt expansion.
   633     if (fs + x_size >= num_regions) {
   634       expand((num_regions - fs) * HeapRegion::GrainBytes);
   635       first = humongous_obj_allocate_find_first(num_regions, word_size);
   636       assert(first != -1, "this should have worked");
   637     }
   638   }
   640   if (first != -1) {
   641     // Index of last region in the series + 1.
   642     int last = first + (int) num_regions;
   644     // We need to initialize the region(s) we just discovered. This is
   645     // a bit tricky given that it can happen concurrently with
   646     // refinement threads refining cards on these regions and
   647     // potentially wanting to refine the BOT as they are scanning
   648     // those cards (this can happen shortly after a cleanup; see CR
   649     // 6991377). So we have to set up the region(s) carefully and in
   650     // a specific order.
   652     // The word size sum of all the regions we will allocate.
   653     size_t word_size_sum = num_regions * HeapRegion::GrainWords;
   654     assert(word_size <= word_size_sum, "sanity");
   656     // This will be the "starts humongous" region.
   657     HeapRegion* first_hr = _hrs->at(first);
   658     // The header of the new object will be placed at the bottom of
   659     // the first region.
   660     HeapWord* new_obj = first_hr->bottom();
   661     // This will be the new end of the first region in the series that
   662     // should also match the end of the last region in the seriers.
   663     HeapWord* new_end = new_obj + word_size_sum;
   664     // This will be the new top of the first region that will reflect
   665     // this allocation.
   666     HeapWord* new_top = new_obj + word_size;
   668     // First, we need to zero the header of the space that we will be
   669     // allocating. When we update top further down, some refinement
   670     // threads might try to scan the region. By zeroing the header we
   671     // ensure that any thread that will try to scan the region will
   672     // come across the zero klass word and bail out.
   673     //
   674     // NOTE: It would not have been correct to have used
   675     // CollectedHeap::fill_with_object() and make the space look like
   676     // an int array. The thread that is doing the allocation will
   677     // later update the object header to a potentially different array
   678     // type and, for a very short period of time, the klass and length
   679     // fields will be inconsistent. This could cause a refinement
   680     // thread to calculate the object size incorrectly.
   681     Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
   683     // We will set up the first region as "starts humongous". This
   684     // will also update the BOT covering all the regions to reflect
   685     // that there is a single object that starts at the bottom of the
   686     // first region.
   687     first_hr->set_startsHumongous(new_top, new_end);
   689     // Then, if there are any, we will set up the "continues
   690     // humongous" regions.
   691     HeapRegion* hr = NULL;
   692     for (int i = first + 1; i < last; ++i) {
   693       hr = _hrs->at(i);
   694       hr->set_continuesHumongous(first_hr);
   695     }
   696     // If we have "continues humongous" regions (hr != NULL), then the
   697     // end of the last one should match new_end.
   698     assert(hr == NULL || hr->end() == new_end, "sanity");
   700     // Up to this point no concurrent thread would have been able to
   701     // do any scanning on any region in this series. All the top
   702     // fields still point to bottom, so the intersection between
   703     // [bottom,top] and [card_start,card_end] will be empty. Before we
   704     // update the top fields, we'll do a storestore to make sure that
   705     // no thread sees the update to top before the zeroing of the
   706     // object header and the BOT initialization.
   707     OrderAccess::storestore();
   709     // Now that the BOT and the object header have been initialized,
   710     // we can update top of the "starts humongous" region.
   711     assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
   712            "new_top should be in this region");
   713     first_hr->set_top(new_top);
   715     // Now, we will update the top fields of the "continues humongous"
   716     // regions. The reason we need to do this is that, otherwise,
   717     // these regions would look empty and this will confuse parts of
   718     // G1. For example, the code that looks for a consecutive number
   719     // of empty regions will consider them empty and try to
   720     // re-allocate them. We can extend is_empty() to also include
   721     // !continuesHumongous(), but it is easier to just update the top
   722     // fields here. The way we set top for all regions (i.e., top ==
   723     // end for all regions but the last one, top == new_top for the
   724     // last one) is actually used when we will free up the humongous
   725     // region in free_humongous_region().
   726     hr = NULL;
   727     for (int i = first + 1; i < last; ++i) {
   728       hr = _hrs->at(i);
   729       if ((i + 1) == last) {
   730         // last continues humongous region
   731         assert(hr->bottom() < new_top && new_top <= hr->end(),
   732                "new_top should fall on this region");
   733         hr->set_top(new_top);
   734       } else {
   735         // not last one
   736         assert(new_top > hr->end(), "new_top should be above this region");
   737         hr->set_top(hr->end());
   738       }
   739     }
   740     // If we have continues humongous regions (hr != NULL), then the
   741     // end of the last one should match new_end and its top should
   742     // match new_top.
   743     assert(hr == NULL ||
   744            (hr->end() == new_end && hr->top() == new_top), "sanity");
   746     assert(first_hr->used() == word_size * HeapWordSize, "invariant");
   747     _summary_bytes_used += first_hr->used();
   748     _humongous_set.add(first_hr);
   750     return new_obj;
   751   }
   753   verify_region_sets_optional();
   754   return NULL;
   755 }
   757 void
   758 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
   759   // Other threads might still be trying to allocate using CASes out
   760   // of the region we are retiring, as they can do so without holding
   761   // the Heap_lock. So we first have to make sure that noone else can
   762   // allocate in it by doing a maximal allocation. Even if our CAS
   763   // attempt fails a few times, we'll succeed sooner or later given
   764   // that a failed CAS attempt mean that the region is getting closed
   765   // to being full (someone else succeeded in allocating into it).
   766   size_t free_word_size = cur_alloc_region->free() / HeapWordSize;
   768   // This is the minimum free chunk we can turn into a dummy
   769   // object. If the free space falls below this, then noone can
   770   // allocate in this region anyway (all allocation requests will be
   771   // of a size larger than this) so we won't have to perform the dummy
   772   // allocation.
   773   size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
   775   while (free_word_size >= min_word_size_to_fill) {
   776     HeapWord* dummy =
   777       cur_alloc_region->par_allocate_no_bot_updates(free_word_size);
   778     if (dummy != NULL) {
   779       // If the allocation was successful we should fill in the space.
   780       CollectedHeap::fill_with_object(dummy, free_word_size);
   781       break;
   782     }
   784     free_word_size = cur_alloc_region->free() / HeapWordSize;
   785     // It's also possible that someone else beats us to the
   786     // allocation and they fill up the region. In that case, we can
   787     // just get out of the loop
   788   }
   789   assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill,
   790          "sanity");
   792   retire_cur_alloc_region_common(cur_alloc_region);
   793   assert(_cur_alloc_region == NULL, "post-condition");
   794 }
   796 // See the comment in the .hpp file about the locking protocol and
   797 // assumptions of this method (and other related ones).
   798 HeapWord*
   799 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
   800                                                        bool at_safepoint,
   801                                                        bool do_dirtying,
   802                                                        bool can_expand) {
   803   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   804   assert(_cur_alloc_region == NULL,
   805          "replace_cur_alloc_region_and_allocate() should only be called "
   806          "after retiring the previous current alloc region");
   807   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   808          "at_safepoint and is_at_safepoint() should be a tautology");
   809   assert(!can_expand || g1_policy()->can_expand_young_list(),
   810          "we should not call this method with can_expand == true if "
   811          "we are not allowed to expand the young gen");
   813   if (can_expand || !g1_policy()->is_young_list_full()) {
   814     HeapRegion* new_cur_alloc_region = new_alloc_region(word_size);
   815     if (new_cur_alloc_region != NULL) {
   816       assert(new_cur_alloc_region->is_empty(),
   817              "the newly-allocated region should be empty, "
   818              "as right now we only allocate new regions out of the free list");
   819       g1_policy()->update_region_num(true /* next_is_young */);
   820       set_region_short_lived_locked(new_cur_alloc_region);
   822       assert(!new_cur_alloc_region->isHumongous(),
   823              "Catch a regression of this bug.");
   825       // We need to ensure that the stores to _cur_alloc_region and,
   826       // subsequently, to top do not float above the setting of the
   827       // young type.
   828       OrderAccess::storestore();
   830       // Now, perform the allocation out of the region we just
   831       // allocated. Note that noone else can access that region at
   832       // this point (as _cur_alloc_region has not been updated yet),
   833       // so we can just go ahead and do the allocation without any
   834       // atomics (and we expect this allocation attempt to
   835       // suceeded). Given that other threads can attempt an allocation
   836       // with a CAS and without needing the Heap_lock, if we assigned
   837       // the new region to _cur_alloc_region before first allocating
   838       // into it other threads might have filled up the new region
   839       // before we got a chance to do the allocation ourselves. In
   840       // that case, we would have needed to retire the region, grab a
   841       // new one, and go through all this again. Allocating out of the
   842       // new region before assigning it to _cur_alloc_region avoids
   843       // all this.
   844       HeapWord* result =
   845                      new_cur_alloc_region->allocate_no_bot_updates(word_size);
   846       assert(result != NULL, "we just allocate out of an empty region "
   847              "so allocation should have been successful");
   848       assert(is_in(result), "result should be in the heap");
   850       // Now make sure that the store to _cur_alloc_region does not
   851       // float above the store to top.
   852       OrderAccess::storestore();
   853       _cur_alloc_region = new_cur_alloc_region;
   855       if (!at_safepoint) {
   856         Heap_lock->unlock();
   857       }
   859       // do the dirtying, if necessary, after we release the Heap_lock
   860       if (do_dirtying) {
   861         dirty_young_block(result, word_size);
   862       }
   863       return result;
   864     }
   865   }
   867   assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
   868          "alloc region, it should still be NULL");
   869   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   870   return NULL;
   871 }
   873 // See the comment in the .hpp file about the locking protocol and
   874 // assumptions of this method (and other related ones).
   875 HeapWord*
   876 G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
   877   assert_heap_locked_and_not_at_safepoint();
   878   assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
   879          "used for humongous allocations");
   881   // We should only reach here when we were unable to allocate
   882   // otherwise. So, we should have not active current alloc region.
   883   assert(_cur_alloc_region == NULL, "current alloc region should be NULL");
   885   // We will loop while succeeded is false, which means that we tried
   886   // to do a collection, but the VM op did not succeed. So, when we
   887   // exit the loop, either one of the allocation attempts was
   888   // successful, or we succeeded in doing the VM op but which was
   889   // unable to allocate after the collection.
   890   for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   891     bool succeeded = true;
   893     // Every time we go round the loop we should be holding the Heap_lock.
   894     assert_heap_locked();
   896     if (GC_locker::is_active_and_needs_gc()) {
   897       // We are locked out of GC because of the GC locker. We can
   898       // allocate a new region only if we can expand the young gen.
   900       if (g1_policy()->can_expand_young_list()) {
   901         // Yes, we are allowed to expand the young gen. Let's try to
   902         // allocate a new current alloc region.
   903         HeapWord* result =
   904           replace_cur_alloc_region_and_allocate(word_size,
   905                                                 false, /* at_safepoint */
   906                                                 true,  /* do_dirtying */
   907                                                 true   /* can_expand */);
   908         if (result != NULL) {
   909           assert_heap_not_locked();
   910           return result;
   911         }
   912       }
   913       // We could not expand the young gen further (or we could but we
   914       // failed to allocate a new region). We'll stall until the GC
   915       // locker forces a GC.
   917       // If this thread is not in a jni critical section, we stall
   918       // the requestor until the critical section has cleared and
   919       // GC allowed. When the critical section clears, a GC is
   920       // initiated by the last thread exiting the critical section; so
   921       // we retry the allocation sequence from the beginning of the loop,
   922       // rather than causing more, now probably unnecessary, GC attempts.
   923       JavaThread* jthr = JavaThread::current();
   924       assert(jthr != NULL, "sanity");
   925       if (jthr->in_critical()) {
   926         if (CheckJNICalls) {
   927           fatal("Possible deadlock due to allocating while"
   928                 " in jni critical section");
   929         }
   930         // We are returning NULL so the protocol is that we're still
   931         // holding the Heap_lock.
   932         assert_heap_locked();
   933         return NULL;
   934       }
   936       Heap_lock->unlock();
   937       GC_locker::stall_until_clear();
   939       // No need to relock the Heap_lock. We'll fall off to the code
   940       // below the else-statement which assumes that we are not
   941       // holding the Heap_lock.
   942     } else {
   943       // We are not locked out. So, let's try to do a GC. The VM op
   944       // will retry the allocation before it completes.
   946       // Read the GC count while holding the Heap_lock
   947       unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   949       Heap_lock->unlock();
   951       HeapWord* result =
   952         do_collection_pause(word_size, gc_count_before, &succeeded);
   953       assert_heap_not_locked();
   954       if (result != NULL) {
   955         assert(succeeded, "the VM op should have succeeded");
   957         // Allocations that take place on VM operations do not do any
   958         // card dirtying and we have to do it here.
   959         dirty_young_block(result, word_size);
   960         return result;
   961       }
   962     }
   964     // Both paths that get us here from above unlock the Heap_lock.
   965     assert_heap_not_locked();
   967     // We can reach here when we were unsuccessful in doing a GC,
   968     // because another thread beat us to it, or because we were locked
   969     // out of GC due to the GC locker. In either case a new alloc
   970     // region might be available so we will retry the allocation.
   971     HeapWord* result = attempt_allocation(word_size);
   972     if (result != NULL) {
   973       assert_heap_not_locked();
   974       return result;
   975     }
   977     // So far our attempts to allocate failed. The only time we'll go
   978     // around the loop and try again is if we tried to do a GC and the
   979     // VM op that we tried to schedule was not successful because
   980     // another thread beat us to it. If that happened it's possible
   981     // that by the time we grabbed the Heap_lock again and tried to
   982     // allocate other threads filled up the young generation, which
   983     // means that the allocation attempt after the GC also failed. So,
   984     // it's worth trying to schedule another GC pause.
   985     if (succeeded) {
   986       break;
   987     }
   989     // Give a warning if we seem to be looping forever.
   990     if ((QueuedAllocationWarningCount > 0) &&
   991         (try_count % QueuedAllocationWarningCount == 0)) {
   992       warning("G1CollectedHeap::attempt_allocation_slow() "
   993               "retries %d times", try_count);
   994     }
   995   }
   997   assert_heap_locked();
   998   return NULL;
   999 }
  1001 // See the comment in the .hpp file about the locking protocol and
  1002 // assumptions of this method (and other related ones).
  1003 HeapWord*
  1004 G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
  1005                                               bool at_safepoint) {
  1006   // This is the method that will allocate a humongous object. All
  1007   // allocation paths that attempt to allocate a humongous object
  1008   // should eventually reach here. Currently, the only paths are from
  1009   // mem_allocate() and attempt_allocation_at_safepoint().
  1010   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  1011   assert(isHumongous(word_size), "attempt_allocation_humongous() "
  1012          "should only be used for humongous allocations");
  1013   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
  1014          "at_safepoint and is_at_safepoint() should be a tautology");
  1016   HeapWord* result = NULL;
  1018   // We will loop while succeeded is false, which means that we tried
  1019   // to do a collection, but the VM op did not succeed. So, when we
  1020   // exit the loop, either one of the allocation attempts was
  1021   // successful, or we succeeded in doing the VM op but which was
  1022   // unable to allocate after the collection.
  1023   for (int try_count = 1; /* we'll return or break */; try_count += 1) {
  1024     bool succeeded = true;
  1026     // Given that humongous objects are not allocated in young
  1027     // regions, we'll first try to do the allocation without doing a
  1028     // collection hoping that there's enough space in the heap.
  1029     result = humongous_obj_allocate(word_size);
  1030     assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
  1031            "catch a regression of this bug.");
  1032     if (result != NULL) {
  1033       if (!at_safepoint) {
  1034         // If we're not at a safepoint, unlock the Heap_lock.
  1035         Heap_lock->unlock();
  1037       return result;
  1040     // If we failed to allocate the humongous object, we should try to
  1041     // do a collection pause (if we're allowed) in case it reclaims
  1042     // enough space for the allocation to succeed after the pause.
  1043     if (!at_safepoint) {
  1044       // Read the GC count while holding the Heap_lock
  1045       unsigned int gc_count_before = SharedHeap::heap()->total_collections();
  1047       // If we're allowed to do a collection we're not at a
  1048       // safepoint, so it is safe to unlock the Heap_lock.
  1049       Heap_lock->unlock();
  1051       result = do_collection_pause(word_size, gc_count_before, &succeeded);
  1052       assert_heap_not_locked();
  1053       if (result != NULL) {
  1054         assert(succeeded, "the VM op should have succeeded");
  1055         return result;
  1058       // If we get here, the VM operation either did not succeed
  1059       // (i.e., another thread beat us to it) or it succeeded but
  1060       // failed to allocate the object.
  1062       // If we're allowed to do a collection we're not at a
  1063       // safepoint, so it is safe to lock the Heap_lock.
  1064       Heap_lock->lock();
  1067     assert(result == NULL, "otherwise we should have exited the loop earlier");
  1069     // So far our attempts to allocate failed. The only time we'll go
  1070     // around the loop and try again is if we tried to do a GC and the
  1071     // VM op that we tried to schedule was not successful because
  1072     // another thread beat us to it. That way it's possible that some
  1073     // space was freed up by the thread that successfully scheduled a
  1074     // GC. So it's worth trying to allocate again.
  1075     if (succeeded) {
  1076       break;
  1079     // Give a warning if we seem to be looping forever.
  1080     if ((QueuedAllocationWarningCount > 0) &&
  1081         (try_count % QueuedAllocationWarningCount == 0)) {
  1082       warning("G1CollectedHeap::attempt_allocation_humongous "
  1083               "retries %d times", try_count);
  1087   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  1088   return NULL;
  1091 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
  1092                                            bool expect_null_cur_alloc_region) {
  1093   assert_at_safepoint(true /* should_be_vm_thread */);
  1094   assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
  1095          err_msg("the current alloc region was unexpectedly found "
  1096                  "to be non-NULL, cur alloc region: "PTR_FORMAT" "
  1097                  "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT,
  1098                  _cur_alloc_region, expect_null_cur_alloc_region, word_size));
  1100   if (!isHumongous(word_size)) {
  1101     if (!expect_null_cur_alloc_region) {
  1102       HeapRegion* cur_alloc_region = _cur_alloc_region;
  1103       if (cur_alloc_region != NULL) {
  1104         // We are at a safepoint so no reason to use the MT-safe version.
  1105         HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size);
  1106         if (result != NULL) {
  1107           assert(is_in(result), "result should be in the heap");
  1109           // We will not do any dirtying here. This is guaranteed to be
  1110           // called during a safepoint and the thread that scheduled the
  1111           // pause will do the dirtying if we return a non-NULL result.
  1112           return result;
  1115         retire_cur_alloc_region_common(cur_alloc_region);
  1119     assert(_cur_alloc_region == NULL,
  1120            "at this point we should have no cur alloc region");
  1121     return replace_cur_alloc_region_and_allocate(word_size,
  1122                                                  true, /* at_safepoint */
  1123                                                  false /* do_dirtying */,
  1124                                                  false /* can_expand */);
  1125   } else {
  1126     return attempt_allocation_humongous(word_size,
  1127                                         true /* at_safepoint */);
  1130   ShouldNotReachHere();
  1133 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
  1134   assert_heap_not_locked_and_not_at_safepoint();
  1135   assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
  1137   // First attempt: Try allocating out of the current alloc region
  1138   // using a CAS. If that fails, take the Heap_lock and retry the
  1139   // allocation, potentially replacing the current alloc region.
  1140   HeapWord* result = attempt_allocation(word_size);
  1141   if (result != NULL) {
  1142     assert_heap_not_locked();
  1143     return result;
  1146   // Second attempt: Go to the slower path where we might try to
  1147   // schedule a collection.
  1148   result = attempt_allocation_slow(word_size);
  1149   if (result != NULL) {
  1150     assert_heap_not_locked();
  1151     return result;
  1154   assert_heap_locked();
  1155   // Need to unlock the Heap_lock before returning.
  1156   Heap_lock->unlock();
  1157   return NULL;
  1160 HeapWord*
  1161 G1CollectedHeap::mem_allocate(size_t word_size,
  1162                               bool   is_noref,
  1163                               bool   is_tlab,
  1164                               bool*  gc_overhead_limit_was_exceeded) {
  1165   assert_heap_not_locked_and_not_at_safepoint();
  1166   assert(!is_tlab, "mem_allocate() this should not be called directly "
  1167          "to allocate TLABs");
  1169   // Loop until the allocation is satisified,
  1170   // or unsatisfied after GC.
  1171   for (int try_count = 1; /* we'll return */; try_count += 1) {
  1172     unsigned int gc_count_before;
  1174       if (!isHumongous(word_size)) {
  1175         // First attempt: Try allocating out of the current alloc region
  1176         // using a CAS. If that fails, take the Heap_lock and retry the
  1177         // allocation, potentially replacing the current alloc region.
  1178         HeapWord* result = attempt_allocation(word_size);
  1179         if (result != NULL) {
  1180           assert_heap_not_locked();
  1181           return result;
  1184         assert_heap_locked();
  1186         // Second attempt: Go to the slower path where we might try to
  1187         // schedule a collection.
  1188         result = attempt_allocation_slow(word_size);
  1189         if (result != NULL) {
  1190           assert_heap_not_locked();
  1191           return result;
  1193       } else {
  1194         // attempt_allocation_humongous() requires the Heap_lock to be held.
  1195         Heap_lock->lock();
  1197         HeapWord* result = attempt_allocation_humongous(word_size,
  1198                                                      false /* at_safepoint */);
  1199         if (result != NULL) {
  1200           assert_heap_not_locked();
  1201           return result;
  1205       assert_heap_locked();
  1206       // Read the gc count while the heap lock is held.
  1207       gc_count_before = SharedHeap::heap()->total_collections();
  1209       // Release the Heap_lock before attempting the collection.
  1210       Heap_lock->unlock();
  1213     // Create the garbage collection operation...
  1214     VM_G1CollectForAllocation op(gc_count_before, word_size);
  1215     // ...and get the VM thread to execute it.
  1216     VMThread::execute(&op);
  1218     assert_heap_not_locked();
  1219     if (op.prologue_succeeded() && op.pause_succeeded()) {
  1220       // If the operation was successful we'll return the result even
  1221       // if it is NULL. If the allocation attempt failed immediately
  1222       // after a Full GC, it's unlikely we'll be able to allocate now.
  1223       HeapWord* result = op.result();
  1224       if (result != NULL && !isHumongous(word_size)) {
  1225         // Allocations that take place on VM operations do not do any
  1226         // card dirtying and we have to do it here. We only have to do
  1227         // this for non-humongous allocations, though.
  1228         dirty_young_block(result, word_size);
  1230       return result;
  1231     } else {
  1232       assert(op.result() == NULL,
  1233              "the result should be NULL if the VM op did not succeed");
  1236     // Give a warning if we seem to be looping forever.
  1237     if ((QueuedAllocationWarningCount > 0) &&
  1238         (try_count % QueuedAllocationWarningCount == 0)) {
  1239       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
  1243   ShouldNotReachHere();
  1246 void G1CollectedHeap::abandon_cur_alloc_region() {
  1247   assert_at_safepoint(true /* should_be_vm_thread */);
  1249   HeapRegion* cur_alloc_region = _cur_alloc_region;
  1250   if (cur_alloc_region != NULL) {
  1251     assert(!cur_alloc_region->is_empty(),
  1252            "the current alloc region can never be empty");
  1253     assert(cur_alloc_region->is_young(),
  1254            "the current alloc region should be young");
  1256     retire_cur_alloc_region_common(cur_alloc_region);
  1258   assert(_cur_alloc_region == NULL, "post-condition");
  1261 void G1CollectedHeap::abandon_gc_alloc_regions() {
  1262   // first, make sure that the GC alloc region list is empty (it should!)
  1263   assert(_gc_alloc_region_list == NULL, "invariant");
  1264   release_gc_alloc_regions(true /* totally */);
  1267 class PostMCRemSetClearClosure: public HeapRegionClosure {
  1268   ModRefBarrierSet* _mr_bs;
  1269 public:
  1270   PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
  1271   bool doHeapRegion(HeapRegion* r) {
  1272     r->reset_gc_time_stamp();
  1273     if (r->continuesHumongous())
  1274       return false;
  1275     HeapRegionRemSet* hrrs = r->rem_set();
  1276     if (hrrs != NULL) hrrs->clear();
  1277     // You might think here that we could clear just the cards
  1278     // corresponding to the used region.  But no: if we leave a dirty card
  1279     // in a region we might allocate into, then it would prevent that card
  1280     // from being enqueued, and cause it to be missed.
  1281     // Re: the performance cost: we shouldn't be doing full GC anyway!
  1282     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  1283     return false;
  1285 };
  1288 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
  1289   ModRefBarrierSet* _mr_bs;
  1290 public:
  1291   PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
  1292   bool doHeapRegion(HeapRegion* r) {
  1293     if (r->continuesHumongous()) return false;
  1294     if (r->used_region().word_size() != 0) {
  1295       _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
  1297     return false;
  1299 };
  1301 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  1302   G1CollectedHeap*   _g1h;
  1303   UpdateRSOopClosure _cl;
  1304   int                _worker_i;
  1305 public:
  1306   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
  1307     _cl(g1->g1_rem_set(), worker_i),
  1308     _worker_i(worker_i),
  1309     _g1h(g1)
  1310   { }
  1312   bool doHeapRegion(HeapRegion* r) {
  1313     if (!r->continuesHumongous()) {
  1314       _cl.set_from(r);
  1315       r->oop_iterate(&_cl);
  1317     return false;
  1319 };
  1321 class ParRebuildRSTask: public AbstractGangTask {
  1322   G1CollectedHeap* _g1;
  1323 public:
  1324   ParRebuildRSTask(G1CollectedHeap* g1)
  1325     : AbstractGangTask("ParRebuildRSTask"),
  1326       _g1(g1)
  1327   { }
  1329   void work(int i) {
  1330     RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
  1331     _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
  1332                                          HeapRegion::RebuildRSClaimValue);
  1334 };
  1336 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1337                                     bool clear_all_soft_refs,
  1338                                     size_t word_size) {
  1339   assert_at_safepoint(true /* should_be_vm_thread */);
  1341   if (GC_locker::check_active_before_gc()) {
  1342     return false;
  1345   SvcGCMarker sgcm(SvcGCMarker::FULL);
  1346   ResourceMark rm;
  1348   if (PrintHeapAtGC) {
  1349     Universe::print_heap_before_gc();
  1352   verify_region_sets_optional();
  1354   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
  1355                            collector_policy()->should_clear_all_soft_refs();
  1357   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  1360     IsGCActiveMark x;
  1362     // Timing
  1363     bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
  1364     assert(!system_gc || explicit_gc, "invariant");
  1365     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  1366     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  1367     TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
  1368                 PrintGC, true, gclog_or_tty);
  1370     TraceMemoryManagerStats tms(true /* fullGC */);
  1372     double start = os::elapsedTime();
  1373     g1_policy()->record_full_collection_start();
  1375     wait_while_free_regions_coming();
  1376     append_secondary_free_list_if_not_empty();
  1378     gc_prologue(true);
  1379     increment_total_collections(true /* full gc */);
  1381     size_t g1h_prev_used = used();
  1382     assert(used() == recalculate_used(), "Should be equal");
  1384     if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  1385       HandleMark hm;  // Discard invalid handles created during verification
  1386       prepare_for_verify();
  1387       gclog_or_tty->print(" VerifyBeforeGC:");
  1388       Universe::verify(true);
  1391     COMPILER2_PRESENT(DerivedPointerTable::clear());
  1393     // We want to discover references, but not process them yet.
  1394     // This mode is disabled in
  1395     // instanceRefKlass::process_discovered_references if the
  1396     // generation does some collection work, or
  1397     // instanceRefKlass::enqueue_discovered_references if the
  1398     // generation returns without doing any work.
  1399     ref_processor()->disable_discovery();
  1400     ref_processor()->abandon_partial_discovery();
  1401     ref_processor()->verify_no_references_recorded();
  1403     // Abandon current iterations of concurrent marking and concurrent
  1404     // refinement, if any are in progress.
  1405     concurrent_mark()->abort();
  1407     // Make sure we'll choose a new allocation region afterwards.
  1408     abandon_cur_alloc_region();
  1409     abandon_gc_alloc_regions();
  1410     assert(_cur_alloc_region == NULL, "Invariant.");
  1411     g1_rem_set()->cleanupHRRS();
  1412     tear_down_region_lists();
  1414     // We may have added regions to the current incremental collection
  1415     // set between the last GC or pause and now. We need to clear the
  1416     // incremental collection set and then start rebuilding it afresh
  1417     // after this full GC.
  1418     abandon_collection_set(g1_policy()->inc_cset_head());
  1419     g1_policy()->clear_incremental_cset();
  1420     g1_policy()->stop_incremental_cset_building();
  1422     if (g1_policy()->in_young_gc_mode()) {
  1423       empty_young_list();
  1424       g1_policy()->set_full_young_gcs(true);
  1427     // See the comment in G1CollectedHeap::ref_processing_init() about
  1428     // how reference processing currently works in G1.
  1430     // Temporarily make reference _discovery_ single threaded (non-MT).
  1431     ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
  1433     // Temporarily make refs discovery atomic
  1434     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
  1436     // Temporarily clear _is_alive_non_header
  1437     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
  1439     ref_processor()->enable_discovery();
  1440     ref_processor()->setup_policy(do_clear_all_soft_refs);
  1442     // Do collection work
  1444       HandleMark hm;  // Discard invalid handles created during gc
  1445       G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
  1447     assert(free_regions() == 0, "we should not have added any free regions");
  1448     rebuild_region_lists();
  1450     _summary_bytes_used = recalculate_used();
  1452     ref_processor()->enqueue_discovered_references();
  1454     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1456     MemoryService::track_memory_usage();
  1458     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  1459       HandleMark hm;  // Discard invalid handles created during verification
  1460       gclog_or_tty->print(" VerifyAfterGC:");
  1461       prepare_for_verify();
  1462       Universe::verify(false);
  1464     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
  1466     reset_gc_time_stamp();
  1467     // Since everything potentially moved, we will clear all remembered
  1468     // sets, and clear all cards.  Later we will rebuild remebered
  1469     // sets. We will also reset the GC time stamps of the regions.
  1470     PostMCRemSetClearClosure rs_clear(mr_bs());
  1471     heap_region_iterate(&rs_clear);
  1473     // Resize the heap if necessary.
  1474     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  1476     if (_cg1r->use_cache()) {
  1477       _cg1r->clear_and_record_card_counts();
  1478       _cg1r->clear_hot_cache();
  1481     // Rebuild remembered sets of all regions.
  1483     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1484       ParRebuildRSTask rebuild_rs_task(this);
  1485       assert(check_heap_region_claim_values(
  1486              HeapRegion::InitialClaimValue), "sanity check");
  1487       set_par_threads(workers()->total_workers());
  1488       workers()->run_task(&rebuild_rs_task);
  1489       set_par_threads(0);
  1490       assert(check_heap_region_claim_values(
  1491              HeapRegion::RebuildRSClaimValue), "sanity check");
  1492       reset_heap_region_claim_values();
  1493     } else {
  1494       RebuildRSOutOfRegionClosure rebuild_rs(this);
  1495       heap_region_iterate(&rebuild_rs);
  1498     if (PrintGC) {
  1499       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  1502     if (true) { // FIXME
  1503       // Ask the permanent generation to adjust size for full collections
  1504       perm()->compute_new_size();
  1507     // Start a new incremental collection set for the next pause
  1508     assert(g1_policy()->collection_set() == NULL, "must be");
  1509     g1_policy()->start_incremental_cset_building();
  1511     // Clear the _cset_fast_test bitmap in anticipation of adding
  1512     // regions to the incremental collection set for the next
  1513     // evacuation pause.
  1514     clear_cset_fast_test();
  1516     double end = os::elapsedTime();
  1517     g1_policy()->record_full_collection_end();
  1519 #ifdef TRACESPINNING
  1520     ParallelTaskTerminator::print_termination_counts();
  1521 #endif
  1523     gc_epilogue(true);
  1525     // Discard all rset updates
  1526     JavaThread::dirty_card_queue_set().abandon_logs();
  1527     assert(!G1DeferredRSUpdate
  1528            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1531   if (g1_policy()->in_young_gc_mode()) {
  1532     _young_list->reset_sampled_info();
  1533     // At this point there should be no regions in the
  1534     // entire heap tagged as young.
  1535     assert( check_young_list_empty(true /* check_heap */),
  1536             "young list should be empty at this point");
  1539   // Update the number of full collections that have been completed.
  1540   increment_full_collections_completed(false /* concurrent */);
  1542   verify_region_sets_optional();
  1544   if (PrintHeapAtGC) {
  1545     Universe::print_heap_after_gc();
  1548   return true;
  1551 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1552   // do_collection() will return whether it succeeded in performing
  1553   // the GC. Currently, there is no facility on the
  1554   // do_full_collection() API to notify the caller than the collection
  1555   // did not succeed (e.g., because it was locked out by the GC
  1556   // locker). So, right now, we'll ignore the return value.
  1557   bool dummy = do_collection(true,                /* explicit_gc */
  1558                              clear_all_soft_refs,
  1559                              0                    /* word_size */);
  1562 // This code is mostly copied from TenuredGeneration.
  1563 void
  1564 G1CollectedHeap::
  1565 resize_if_necessary_after_full_collection(size_t word_size) {
  1566   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
  1568   // Include the current allocation, if any, and bytes that will be
  1569   // pre-allocated to support collections, as "used".
  1570   const size_t used_after_gc = used();
  1571   const size_t capacity_after_gc = capacity();
  1572   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1574   // This is enforced in arguments.cpp.
  1575   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
  1576          "otherwise the code below doesn't make sense");
  1578   // We don't have floating point command-line arguments
  1579   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
  1580   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1581   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
  1582   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1584   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
  1585   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
  1587   // We have to be careful here as these two calculations can overflow
  1588   // 32-bit size_t's.
  1589   double used_after_gc_d = (double) used_after_gc;
  1590   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
  1591   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
  1593   // Let's make sure that they are both under the max heap size, which
  1594   // by default will make them fit into a size_t.
  1595   double desired_capacity_upper_bound = (double) max_heap_size;
  1596   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
  1597                                     desired_capacity_upper_bound);
  1598   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
  1599                                     desired_capacity_upper_bound);
  1601   // We can now safely turn them into size_t's.
  1602   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
  1603   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
  1605   // This assert only makes sense here, before we adjust them
  1606   // with respect to the min and max heap size.
  1607   assert(minimum_desired_capacity <= maximum_desired_capacity,
  1608          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
  1609                  "maximum_desired_capacity = "SIZE_FORMAT,
  1610                  minimum_desired_capacity, maximum_desired_capacity));
  1612   // Should not be greater than the heap max size. No need to adjust
  1613   // it with respect to the heap min size as it's a lower bound (i.e.,
  1614   // we'll try to make the capacity larger than it, not smaller).
  1615   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
  1616   // Should not be less than the heap min size. No need to adjust it
  1617   // with respect to the heap max size as it's an upper bound (i.e.,
  1618   // we'll try to make the capacity smaller than it, not greater).
  1619   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
  1621   if (PrintGC && Verbose) {
  1622     const double free_percentage =
  1623       (double) free_after_gc / (double) capacity_after_gc;
  1624     gclog_or_tty->print_cr("Computing new size after full GC ");
  1625     gclog_or_tty->print_cr("  "
  1626                            "  minimum_free_percentage: %6.2f",
  1627                            minimum_free_percentage);
  1628     gclog_or_tty->print_cr("  "
  1629                            "  maximum_free_percentage: %6.2f",
  1630                            maximum_free_percentage);
  1631     gclog_or_tty->print_cr("  "
  1632                            "  capacity: %6.1fK"
  1633                            "  minimum_desired_capacity: %6.1fK"
  1634                            "  maximum_desired_capacity: %6.1fK",
  1635                            (double) capacity_after_gc / (double) K,
  1636                            (double) minimum_desired_capacity / (double) K,
  1637                            (double) maximum_desired_capacity / (double) K);
  1638     gclog_or_tty->print_cr("  "
  1639                            "  free_after_gc: %6.1fK"
  1640                            "  used_after_gc: %6.1fK",
  1641                            (double) free_after_gc / (double) K,
  1642                            (double) used_after_gc / (double) K);
  1643     gclog_or_tty->print_cr("  "
  1644                            "   free_percentage: %6.2f",
  1645                            free_percentage);
  1647   if (capacity_after_gc < minimum_desired_capacity) {
  1648     // Don't expand unless it's significant
  1649     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1650     expand(expand_bytes);
  1651     if (PrintGC && Verbose) {
  1652       gclog_or_tty->print_cr("  "
  1653                              "  expanding:"
  1654                              "  max_heap_size: %6.1fK"
  1655                              "  minimum_desired_capacity: %6.1fK"
  1656                              "  expand_bytes: %6.1fK",
  1657                              (double) max_heap_size / (double) K,
  1658                              (double) minimum_desired_capacity / (double) K,
  1659                              (double) expand_bytes / (double) K);
  1662     // No expansion, now see if we want to shrink
  1663   } else if (capacity_after_gc > maximum_desired_capacity) {
  1664     // Capacity too large, compute shrinking size
  1665     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1666     shrink(shrink_bytes);
  1667     if (PrintGC && Verbose) {
  1668       gclog_or_tty->print_cr("  "
  1669                              "  shrinking:"
  1670                              "  min_heap_size: %6.1fK"
  1671                              "  maximum_desired_capacity: %6.1fK"
  1672                              "  shrink_bytes: %6.1fK",
  1673                              (double) min_heap_size / (double) K,
  1674                              (double) maximum_desired_capacity / (double) K,
  1675                              (double) shrink_bytes / (double) K);
  1681 HeapWord*
  1682 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
  1683                                            bool* succeeded) {
  1684   assert_at_safepoint(true /* should_be_vm_thread */);
  1686   *succeeded = true;
  1687   // Let's attempt the allocation first.
  1688   HeapWord* result = attempt_allocation_at_safepoint(word_size,
  1689                                      false /* expect_null_cur_alloc_region */);
  1690   if (result != NULL) {
  1691     assert(*succeeded, "sanity");
  1692     return result;
  1695   // In a G1 heap, we're supposed to keep allocation from failing by
  1696   // incremental pauses.  Therefore, at least for now, we'll favor
  1697   // expansion over collection.  (This might change in the future if we can
  1698   // do something smarter than full collection to satisfy a failed alloc.)
  1699   result = expand_and_allocate(word_size);
  1700   if (result != NULL) {
  1701     assert(*succeeded, "sanity");
  1702     return result;
  1705   // Expansion didn't work, we'll try to do a Full GC.
  1706   bool gc_succeeded = do_collection(false, /* explicit_gc */
  1707                                     false, /* clear_all_soft_refs */
  1708                                     word_size);
  1709   if (!gc_succeeded) {
  1710     *succeeded = false;
  1711     return NULL;
  1714   // Retry the allocation
  1715   result = attempt_allocation_at_safepoint(word_size,
  1716                                       true /* expect_null_cur_alloc_region */);
  1717   if (result != NULL) {
  1718     assert(*succeeded, "sanity");
  1719     return result;
  1722   // Then, try a Full GC that will collect all soft references.
  1723   gc_succeeded = do_collection(false, /* explicit_gc */
  1724                                true,  /* clear_all_soft_refs */
  1725                                word_size);
  1726   if (!gc_succeeded) {
  1727     *succeeded = false;
  1728     return NULL;
  1731   // Retry the allocation once more
  1732   result = attempt_allocation_at_safepoint(word_size,
  1733                                       true /* expect_null_cur_alloc_region */);
  1734   if (result != NULL) {
  1735     assert(*succeeded, "sanity");
  1736     return result;
  1739   assert(!collector_policy()->should_clear_all_soft_refs(),
  1740          "Flag should have been handled and cleared prior to this point");
  1742   // What else?  We might try synchronous finalization later.  If the total
  1743   // space available is large enough for the allocation, then a more
  1744   // complete compaction phase than we've tried so far might be
  1745   // appropriate.
  1746   assert(*succeeded, "sanity");
  1747   return NULL;
  1750 // Attempting to expand the heap sufficiently
  1751 // to support an allocation of the given "word_size".  If
  1752 // successful, perform the allocation and return the address of the
  1753 // allocated block, or else "NULL".
  1755 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1756   assert_at_safepoint(true /* should_be_vm_thread */);
  1758   verify_region_sets_optional();
  1760   size_t expand_bytes = word_size * HeapWordSize;
  1761   if (expand_bytes < MinHeapDeltaBytes) {
  1762     expand_bytes = MinHeapDeltaBytes;
  1764   expand(expand_bytes);
  1766   verify_region_sets_optional();
  1768   return attempt_allocation_at_safepoint(word_size,
  1769                                      false /* expect_null_cur_alloc_region */);
  1772 // FIXME: both this and shrink could probably be more efficient by
  1773 // doing one "VirtualSpace::expand_by" call rather than several.
  1774 void G1CollectedHeap::expand(size_t expand_bytes) {
  1775   size_t old_mem_size = _g1_storage.committed_size();
  1776   // We expand by a minimum of 1K.
  1777   expand_bytes = MAX2(expand_bytes, (size_t)K);
  1778   size_t aligned_expand_bytes =
  1779     ReservedSpace::page_align_size_up(expand_bytes);
  1780   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1781                                        HeapRegion::GrainBytes);
  1782   expand_bytes = aligned_expand_bytes;
  1783   while (expand_bytes > 0) {
  1784     HeapWord* base = (HeapWord*)_g1_storage.high();
  1785     // Commit more storage.
  1786     bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes);
  1787     if (!successful) {
  1788         expand_bytes = 0;
  1789     } else {
  1790       expand_bytes -= HeapRegion::GrainBytes;
  1791       // Expand the committed region.
  1792       HeapWord* high = (HeapWord*) _g1_storage.high();
  1793       _g1_committed.set_end(high);
  1794       // Create a new HeapRegion.
  1795       MemRegion mr(base, high);
  1796       bool is_zeroed = !_g1_max_committed.contains(base);
  1797       HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
  1799       // Now update max_committed if necessary.
  1800       _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
  1802       // Add it to the HeapRegionSeq.
  1803       _hrs->insert(hr);
  1804       _free_list.add_as_tail(hr);
  1805       // And we used up an expansion region to create it.
  1806       _expansion_regions--;
  1807       // Tell the cardtable about it.
  1808       Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1809       // And the offset table as well.
  1810       _bot_shared->resize(_g1_committed.word_size());
  1814   if (Verbose && PrintGC) {
  1815     size_t new_mem_size = _g1_storage.committed_size();
  1816     gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
  1817                            old_mem_size/K, aligned_expand_bytes/K,
  1818                            new_mem_size/K);
  1822 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
  1824   size_t old_mem_size = _g1_storage.committed_size();
  1825   size_t aligned_shrink_bytes =
  1826     ReservedSpace::page_align_size_down(shrink_bytes);
  1827   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1828                                          HeapRegion::GrainBytes);
  1829   size_t num_regions_deleted = 0;
  1830   MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
  1832   assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1833   if (mr.byte_size() > 0)
  1834     _g1_storage.shrink_by(mr.byte_size());
  1835   assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1837   _g1_committed.set_end(mr.start());
  1838   _expansion_regions += num_regions_deleted;
  1840   // Tell the cardtable about it.
  1841   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1843   // And the offset table as well.
  1844   _bot_shared->resize(_g1_committed.word_size());
  1846   HeapRegionRemSet::shrink_heap(n_regions());
  1848   if (Verbose && PrintGC) {
  1849     size_t new_mem_size = _g1_storage.committed_size();
  1850     gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
  1851                            old_mem_size/K, aligned_shrink_bytes/K,
  1852                            new_mem_size/K);
  1856 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1857   verify_region_sets_optional();
  1859   release_gc_alloc_regions(true /* totally */);
  1860   // Instead of tearing down / rebuilding the free lists here, we
  1861   // could instead use the remove_all_pending() method on free_list to
  1862   // remove only the ones that we need to remove.
  1863   tear_down_region_lists();  // We will rebuild them in a moment.
  1864   shrink_helper(shrink_bytes);
  1865   rebuild_region_lists();
  1867   verify_region_sets_optional();
  1870 // Public methods.
  1872 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1873 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1874 #endif // _MSC_VER
  1877 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1878   SharedHeap(policy_),
  1879   _g1_policy(policy_),
  1880   _dirty_card_queue_set(false),
  1881   _into_cset_dirty_card_queue_set(false),
  1882   _is_alive_closure(this),
  1883   _ref_processor(NULL),
  1884   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1885   _bot_shared(NULL),
  1886   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1887   _evac_failure_scan_stack(NULL) ,
  1888   _mark_in_progress(false),
  1889   _cg1r(NULL), _summary_bytes_used(0),
  1890   _cur_alloc_region(NULL),
  1891   _refine_cte_cl(NULL),
  1892   _full_collection(false),
  1893   _free_list("Master Free List"),
  1894   _secondary_free_list("Secondary Free List"),
  1895   _humongous_set("Master Humongous Set"),
  1896   _free_regions_coming(false),
  1897   _young_list(new YoungList(this)),
  1898   _gc_time_stamp(0),
  1899   _surviving_young_words(NULL),
  1900   _full_collections_completed(0),
  1901   _in_cset_fast_test(NULL),
  1902   _in_cset_fast_test_base(NULL),
  1903   _dirty_cards_region_list(NULL) {
  1904   _g1h = this; // To catch bugs.
  1905   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1906     vm_exit_during_initialization("Failed necessary allocation.");
  1909   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
  1911   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1912   _task_queues = new RefToScanQueueSet(n_queues);
  1914   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1915   assert(n_rem_sets > 0, "Invariant.");
  1917   HeapRegionRemSetIterator** iter_arr =
  1918     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
  1919   for (int i = 0; i < n_queues; i++) {
  1920     iter_arr[i] = new HeapRegionRemSetIterator();
  1922   _rem_set_iterator = iter_arr;
  1924   for (int i = 0; i < n_queues; i++) {
  1925     RefToScanQueue* q = new RefToScanQueue();
  1926     q->initialize();
  1927     _task_queues->register_queue(i, q);
  1930   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  1931     _gc_alloc_regions[ap]          = NULL;
  1932     _gc_alloc_region_counts[ap]    = 0;
  1933     _retained_gc_alloc_regions[ap] = NULL;
  1934     // by default, we do not retain a GC alloc region for each ap;
  1935     // we'll override this, when appropriate, below
  1936     _retain_gc_alloc_region[ap]    = false;
  1939   // We will try to remember the last half-full tenured region we
  1940   // allocated to at the end of a collection so that we can re-use it
  1941   // during the next collection.
  1942   _retain_gc_alloc_region[GCAllocForTenured]  = true;
  1944   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1947 jint G1CollectedHeap::initialize() {
  1948   CollectedHeap::pre_initialize();
  1949   os::enable_vtime();
  1951   // Necessary to satisfy locking discipline assertions.
  1953   MutexLocker x(Heap_lock);
  1955   // While there are no constraints in the GC code that HeapWordSize
  1956   // be any particular value, there are multiple other areas in the
  1957   // system which believe this to be true (e.g. oop->object_size in some
  1958   // cases incorrectly returns the size in wordSize units rather than
  1959   // HeapWordSize).
  1960   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  1962   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  1963   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  1965   // Ensure that the sizes are properly aligned.
  1966   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1967   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1969   _cg1r = new ConcurrentG1Refine();
  1971   // Reserve the maximum.
  1972   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
  1973   // Includes the perm-gen.
  1975   const size_t total_reserved = max_byte_size + pgs->max_size();
  1976   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
  1978   ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
  1979                         HeapRegion::GrainBytes,
  1980                         UseLargePages, addr);
  1982   if (UseCompressedOops) {
  1983     if (addr != NULL && !heap_rs.is_reserved()) {
  1984       // Failed to reserve at specified address - the requested memory
  1985       // region is taken already, for example, by 'java' launcher.
  1986       // Try again to reserver heap higher.
  1987       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
  1988       ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
  1989                              UseLargePages, addr);
  1990       if (addr != NULL && !heap_rs0.is_reserved()) {
  1991         // Failed to reserve at specified address again - give up.
  1992         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
  1993         assert(addr == NULL, "");
  1994         ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
  1995                                UseLargePages, addr);
  1996         heap_rs = heap_rs1;
  1997       } else {
  1998         heap_rs = heap_rs0;
  2003   if (!heap_rs.is_reserved()) {
  2004     vm_exit_during_initialization("Could not reserve enough space for object heap");
  2005     return JNI_ENOMEM;
  2008   // It is important to do this in a way such that concurrent readers can't
  2009   // temporarily think somethings in the heap.  (I've actually seen this
  2010   // happen in asserts: DLD.)
  2011   _reserved.set_word_size(0);
  2012   _reserved.set_start((HeapWord*)heap_rs.base());
  2013   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  2015   _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
  2017   // Create the gen rem set (and barrier set) for the entire reserved region.
  2018   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  2019   set_barrier_set(rem_set()->bs());
  2020   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  2021     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  2022   } else {
  2023     vm_exit_during_initialization("G1 requires a mod ref bs.");
  2024     return JNI_ENOMEM;
  2027   // Also create a G1 rem set.
  2028   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
  2029     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
  2030   } else {
  2031     vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
  2032     return JNI_ENOMEM;
  2035   // Carve out the G1 part of the heap.
  2037   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  2038   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  2039                            g1_rs.size()/HeapWordSize);
  2040   ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
  2042   _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
  2044   _g1_storage.initialize(g1_rs, 0);
  2045   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  2046   _g1_max_committed = _g1_committed;
  2047   _hrs = new HeapRegionSeq(_expansion_regions);
  2048   guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
  2049   guarantee(_cur_alloc_region == NULL, "from constructor");
  2051   // 6843694 - ensure that the maximum region index can fit
  2052   // in the remembered set structures.
  2053   const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  2054   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  2056   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  2057   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  2058   guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
  2059             "too many cards per region");
  2061   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
  2063   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  2064                                              heap_word_size(init_byte_size));
  2066   _g1h = this;
  2068    _in_cset_fast_test_length = max_regions();
  2069    _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
  2071    // We're biasing _in_cset_fast_test to avoid subtracting the
  2072    // beginning of the heap every time we want to index; basically
  2073    // it's the same with what we do with the card table.
  2074    _in_cset_fast_test = _in_cset_fast_test_base -
  2075                 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  2077    // Clear the _cset_fast_test bitmap in anticipation of adding
  2078    // regions to the incremental collection set for the first
  2079    // evacuation pause.
  2080    clear_cset_fast_test();
  2082   // Create the ConcurrentMark data structure and thread.
  2083   // (Must do this late, so that "max_regions" is defined.)
  2084   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
  2085   _cmThread = _cm->cmThread();
  2087   // Initialize the from_card cache structure of HeapRegionRemSet.
  2088   HeapRegionRemSet::init_heap(max_regions());
  2090   // Now expand into the initial heap size.
  2091   expand(init_byte_size);
  2093   // Perform any initialization actions delegated to the policy.
  2094   g1_policy()->init();
  2096   g1_policy()->note_start_of_mark_thread();
  2098   _refine_cte_cl =
  2099     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
  2100                                     g1_rem_set(),
  2101                                     concurrent_g1_refine());
  2102   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
  2104   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  2105                                                SATB_Q_FL_lock,
  2106                                                G1SATBProcessCompletedThreshold,
  2107                                                Shared_SATB_Q_lock);
  2109   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2110                                                 DirtyCardQ_FL_lock,
  2111                                                 concurrent_g1_refine()->yellow_zone(),
  2112                                                 concurrent_g1_refine()->red_zone(),
  2113                                                 Shared_DirtyCardQ_lock);
  2115   if (G1DeferredRSUpdate) {
  2116     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2117                                       DirtyCardQ_FL_lock,
  2118                                       -1, // never trigger processing
  2119                                       -1, // no limit on length
  2120                                       Shared_DirtyCardQ_lock,
  2121                                       &JavaThread::dirty_card_queue_set());
  2124   // Initialize the card queue set used to hold cards containing
  2125   // references into the collection set.
  2126   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
  2127                                              DirtyCardQ_FL_lock,
  2128                                              -1, // never trigger processing
  2129                                              -1, // no limit on length
  2130                                              Shared_DirtyCardQ_lock,
  2131                                              &JavaThread::dirty_card_queue_set());
  2133   // In case we're keeping closure specialization stats, initialize those
  2134   // counts and that mechanism.
  2135   SpecializationStats::clear();
  2137   _gc_alloc_region_list = NULL;
  2139   // Do later initialization work for concurrent refinement.
  2140   _cg1r->init();
  2142   return JNI_OK;
  2145 void G1CollectedHeap::ref_processing_init() {
  2146   // Reference processing in G1 currently works as follows:
  2147   //
  2148   // * There is only one reference processor instance that
  2149   //   'spans' the entire heap. It is created by the code
  2150   //   below.
  2151   // * Reference discovery is not enabled during an incremental
  2152   //   pause (see 6484982).
  2153   // * Discoverered refs are not enqueued nor are they processed
  2154   //   during an incremental pause (see 6484982).
  2155   // * Reference discovery is enabled at initial marking.
  2156   // * Reference discovery is disabled and the discovered
  2157   //   references processed etc during remarking.
  2158   // * Reference discovery is MT (see below).
  2159   // * Reference discovery requires a barrier (see below).
  2160   // * Reference processing is currently not MT (see 6608385).
  2161   // * A full GC enables (non-MT) reference discovery and
  2162   //   processes any discovered references.
  2164   SharedHeap::ref_processing_init();
  2165   MemRegion mr = reserved_region();
  2166   _ref_processor = ReferenceProcessor::create_ref_processor(
  2167                                          mr,    // span
  2168                                          false, // Reference discovery is not atomic
  2169                                          true,  // mt_discovery
  2170                                          &_is_alive_closure, // is alive closure
  2171                                                              // for efficiency
  2172                                          ParallelGCThreads,
  2173                                          ParallelRefProcEnabled,
  2174                                          true); // Setting next fields of discovered
  2175                                                 // lists requires a barrier.
  2178 size_t G1CollectedHeap::capacity() const {
  2179   return _g1_committed.byte_size();
  2182 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  2183                                                  DirtyCardQueue* into_cset_dcq,
  2184                                                  bool concurrent,
  2185                                                  int worker_i) {
  2186   // Clean cards in the hot card cache
  2187   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
  2189   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2190   int n_completed_buffers = 0;
  2191   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
  2192     n_completed_buffers++;
  2194   g1_policy()->record_update_rs_processed_buffers(worker_i,
  2195                                                   (double) n_completed_buffers);
  2196   dcqs.clear_n_completed_buffers();
  2197   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  2201 // Computes the sum of the storage used by the various regions.
  2203 size_t G1CollectedHeap::used() const {
  2204   assert(Heap_lock->owner() != NULL,
  2205          "Should be owned on this thread's behalf.");
  2206   size_t result = _summary_bytes_used;
  2207   // Read only once in case it is set to NULL concurrently
  2208   HeapRegion* hr = _cur_alloc_region;
  2209   if (hr != NULL)
  2210     result += hr->used();
  2211   return result;
  2214 size_t G1CollectedHeap::used_unlocked() const {
  2215   size_t result = _summary_bytes_used;
  2216   return result;
  2219 class SumUsedClosure: public HeapRegionClosure {
  2220   size_t _used;
  2221 public:
  2222   SumUsedClosure() : _used(0) {}
  2223   bool doHeapRegion(HeapRegion* r) {
  2224     if (!r->continuesHumongous()) {
  2225       _used += r->used();
  2227     return false;
  2229   size_t result() { return _used; }
  2230 };
  2232 size_t G1CollectedHeap::recalculate_used() const {
  2233   SumUsedClosure blk;
  2234   _hrs->iterate(&blk);
  2235   return blk.result();
  2238 #ifndef PRODUCT
  2239 class SumUsedRegionsClosure: public HeapRegionClosure {
  2240   size_t _num;
  2241 public:
  2242   SumUsedRegionsClosure() : _num(0) {}
  2243   bool doHeapRegion(HeapRegion* r) {
  2244     if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
  2245       _num += 1;
  2247     return false;
  2249   size_t result() { return _num; }
  2250 };
  2252 size_t G1CollectedHeap::recalculate_used_regions() const {
  2253   SumUsedRegionsClosure blk;
  2254   _hrs->iterate(&blk);
  2255   return blk.result();
  2257 #endif // PRODUCT
  2259 size_t G1CollectedHeap::unsafe_max_alloc() {
  2260   if (free_regions() > 0) return HeapRegion::GrainBytes;
  2261   // otherwise, is there space in the current allocation region?
  2263   // We need to store the current allocation region in a local variable
  2264   // here. The problem is that this method doesn't take any locks and
  2265   // there may be other threads which overwrite the current allocation
  2266   // region field. attempt_allocation(), for example, sets it to NULL
  2267   // and this can happen *after* the NULL check here but before the call
  2268   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  2269   // to be a problem in the optimized build, since the two loads of the
  2270   // current allocation region field are optimized away.
  2271   HeapRegion* car = _cur_alloc_region;
  2273   // FIXME: should iterate over all regions?
  2274   if (car == NULL) {
  2275     return 0;
  2277   return car->free();
  2280 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
  2281   return
  2282     ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
  2283      (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
  2286 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
  2287   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  2289   // We assume that if concurrent == true, then the caller is a
  2290   // concurrent thread that was joined the Suspendible Thread
  2291   // Set. If there's ever a cheap way to check this, we should add an
  2292   // assert here.
  2294   // We have already incremented _total_full_collections at the start
  2295   // of the GC, so total_full_collections() represents how many full
  2296   // collections have been started.
  2297   unsigned int full_collections_started = total_full_collections();
  2299   // Given that this method is called at the end of a Full GC or of a
  2300   // concurrent cycle, and those can be nested (i.e., a Full GC can
  2301   // interrupt a concurrent cycle), the number of full collections
  2302   // completed should be either one (in the case where there was no
  2303   // nesting) or two (when a Full GC interrupted a concurrent cycle)
  2304   // behind the number of full collections started.
  2306   // This is the case for the inner caller, i.e. a Full GC.
  2307   assert(concurrent ||
  2308          (full_collections_started == _full_collections_completed + 1) ||
  2309          (full_collections_started == _full_collections_completed + 2),
  2310          err_msg("for inner caller (Full GC): full_collections_started = %u "
  2311                  "is inconsistent with _full_collections_completed = %u",
  2312                  full_collections_started, _full_collections_completed));
  2314   // This is the case for the outer caller, i.e. the concurrent cycle.
  2315   assert(!concurrent ||
  2316          (full_collections_started == _full_collections_completed + 1),
  2317          err_msg("for outer caller (concurrent cycle): "
  2318                  "full_collections_started = %u "
  2319                  "is inconsistent with _full_collections_completed = %u",
  2320                  full_collections_started, _full_collections_completed));
  2322   _full_collections_completed += 1;
  2324   // We need to clear the "in_progress" flag in the CM thread before
  2325   // we wake up any waiters (especially when ExplicitInvokesConcurrent
  2326   // is set) so that if a waiter requests another System.gc() it doesn't
  2327   // incorrectly see that a marking cyle is still in progress.
  2328   if (concurrent) {
  2329     _cmThread->clear_in_progress();
  2332   // This notify_all() will ensure that a thread that called
  2333   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2334   // and it's waiting for a full GC to finish will be woken up. It is
  2335   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2336   FullGCCount_lock->notify_all();
  2339 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  2340   assert_at_safepoint(true /* should_be_vm_thread */);
  2341   GCCauseSetter gcs(this, cause);
  2342   switch (cause) {
  2343     case GCCause::_heap_inspection:
  2344     case GCCause::_heap_dump: {
  2345       HandleMark hm;
  2346       do_full_collection(false);         // don't clear all soft refs
  2347       break;
  2349     default: // XXX FIX ME
  2350       ShouldNotReachHere(); // Unexpected use of this function
  2354 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2355   // The caller doesn't have the Heap_lock
  2356   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  2358   unsigned int gc_count_before;
  2359   unsigned int full_gc_count_before;
  2361     MutexLocker ml(Heap_lock);
  2363     // Read the GC count while holding the Heap_lock
  2364     gc_count_before = SharedHeap::heap()->total_collections();
  2365     full_gc_count_before = SharedHeap::heap()->total_full_collections();
  2368   if (should_do_concurrent_full_gc(cause)) {
  2369     // Schedule an initial-mark evacuation pause that will start a
  2370     // concurrent cycle. We're setting word_size to 0 which means that
  2371     // we are not requesting a post-GC allocation.
  2372     VM_G1IncCollectionPause op(gc_count_before,
  2373                                0,     /* word_size */
  2374                                true,  /* should_initiate_conc_mark */
  2375                                g1_policy()->max_pause_time_ms(),
  2376                                cause);
  2377     VMThread::execute(&op);
  2378   } else {
  2379     if (cause == GCCause::_gc_locker
  2380         DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2382       // Schedule a standard evacuation pause. We're setting word_size
  2383       // to 0 which means that we are not requesting a post-GC allocation.
  2384       VM_G1IncCollectionPause op(gc_count_before,
  2385                                  0,     /* word_size */
  2386                                  false, /* should_initiate_conc_mark */
  2387                                  g1_policy()->max_pause_time_ms(),
  2388                                  cause);
  2389       VMThread::execute(&op);
  2390     } else {
  2391       // Schedule a Full GC.
  2392       VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
  2393       VMThread::execute(&op);
  2398 bool G1CollectedHeap::is_in(const void* p) const {
  2399   if (_g1_committed.contains(p)) {
  2400     HeapRegion* hr = _hrs->addr_to_region(p);
  2401     return hr->is_in(p);
  2402   } else {
  2403     return _perm_gen->as_gen()->is_in(p);
  2407 // Iteration functions.
  2409 // Iterates an OopClosure over all ref-containing fields of objects
  2410 // within a HeapRegion.
  2412 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  2413   MemRegion _mr;
  2414   OopClosure* _cl;
  2415 public:
  2416   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  2417     : _mr(mr), _cl(cl) {}
  2418   bool doHeapRegion(HeapRegion* r) {
  2419     if (! r->continuesHumongous()) {
  2420       r->oop_iterate(_cl);
  2422     return false;
  2424 };
  2426 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
  2427   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  2428   _hrs->iterate(&blk);
  2429   if (do_perm) {
  2430     perm_gen()->oop_iterate(cl);
  2434 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
  2435   IterateOopClosureRegionClosure blk(mr, cl);
  2436   _hrs->iterate(&blk);
  2437   if (do_perm) {
  2438     perm_gen()->oop_iterate(cl);
  2442 // Iterates an ObjectClosure over all objects within a HeapRegion.
  2444 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  2445   ObjectClosure* _cl;
  2446 public:
  2447   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  2448   bool doHeapRegion(HeapRegion* r) {
  2449     if (! r->continuesHumongous()) {
  2450       r->object_iterate(_cl);
  2452     return false;
  2454 };
  2456 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
  2457   IterateObjectClosureRegionClosure blk(cl);
  2458   _hrs->iterate(&blk);
  2459   if (do_perm) {
  2460     perm_gen()->object_iterate(cl);
  2464 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
  2465   // FIXME: is this right?
  2466   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
  2469 // Calls a SpaceClosure on a HeapRegion.
  2471 class SpaceClosureRegionClosure: public HeapRegionClosure {
  2472   SpaceClosure* _cl;
  2473 public:
  2474   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  2475   bool doHeapRegion(HeapRegion* r) {
  2476     _cl->do_space(r);
  2477     return false;
  2479 };
  2481 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  2482   SpaceClosureRegionClosure blk(cl);
  2483   _hrs->iterate(&blk);
  2486 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
  2487   _hrs->iterate(cl);
  2490 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
  2491                                                HeapRegionClosure* cl) {
  2492   _hrs->iterate_from(r, cl);
  2495 void
  2496 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
  2497   _hrs->iterate_from(idx, cl);
  2500 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
  2502 void
  2503 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  2504                                                  int worker,
  2505                                                  jint claim_value) {
  2506   const size_t regions = n_regions();
  2507   const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1);
  2508   // try to spread out the starting points of the workers
  2509   const size_t start_index = regions / worker_num * (size_t) worker;
  2511   // each worker will actually look at all regions
  2512   for (size_t count = 0; count < regions; ++count) {
  2513     const size_t index = (start_index + count) % regions;
  2514     assert(0 <= index && index < regions, "sanity");
  2515     HeapRegion* r = region_at(index);
  2516     // we'll ignore "continues humongous" regions (we'll process them
  2517     // when we come across their corresponding "start humongous"
  2518     // region) and regions already claimed
  2519     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  2520       continue;
  2522     // OK, try to claim it
  2523     if (r->claimHeapRegion(claim_value)) {
  2524       // success!
  2525       assert(!r->continuesHumongous(), "sanity");
  2526       if (r->startsHumongous()) {
  2527         // If the region is "starts humongous" we'll iterate over its
  2528         // "continues humongous" first; in fact we'll do them
  2529         // first. The order is important. In on case, calling the
  2530         // closure on the "starts humongous" region might de-allocate
  2531         // and clear all its "continues humongous" regions and, as a
  2532         // result, we might end up processing them twice. So, we'll do
  2533         // them first (notice: most closures will ignore them anyway) and
  2534         // then we'll do the "starts humongous" region.
  2535         for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
  2536           HeapRegion* chr = region_at(ch_index);
  2538           // if the region has already been claimed or it's not
  2539           // "continues humongous" we're done
  2540           if (chr->claim_value() == claim_value ||
  2541               !chr->continuesHumongous()) {
  2542             break;
  2545           // Noone should have claimed it directly. We can given
  2546           // that we claimed its "starts humongous" region.
  2547           assert(chr->claim_value() != claim_value, "sanity");
  2548           assert(chr->humongous_start_region() == r, "sanity");
  2550           if (chr->claimHeapRegion(claim_value)) {
  2551             // we should always be able to claim it; noone else should
  2552             // be trying to claim this region
  2554             bool res2 = cl->doHeapRegion(chr);
  2555             assert(!res2, "Should not abort");
  2557             // Right now, this holds (i.e., no closure that actually
  2558             // does something with "continues humongous" regions
  2559             // clears them). We might have to weaken it in the future,
  2560             // but let's leave these two asserts here for extra safety.
  2561             assert(chr->continuesHumongous(), "should still be the case");
  2562             assert(chr->humongous_start_region() == r, "sanity");
  2563           } else {
  2564             guarantee(false, "we should not reach here");
  2569       assert(!r->continuesHumongous(), "sanity");
  2570       bool res = cl->doHeapRegion(r);
  2571       assert(!res, "Should not abort");
  2576 class ResetClaimValuesClosure: public HeapRegionClosure {
  2577 public:
  2578   bool doHeapRegion(HeapRegion* r) {
  2579     r->set_claim_value(HeapRegion::InitialClaimValue);
  2580     return false;
  2582 };
  2584 void
  2585 G1CollectedHeap::reset_heap_region_claim_values() {
  2586   ResetClaimValuesClosure blk;
  2587   heap_region_iterate(&blk);
  2590 #ifdef ASSERT
  2591 // This checks whether all regions in the heap have the correct claim
  2592 // value. I also piggy-backed on this a check to ensure that the
  2593 // humongous_start_region() information on "continues humongous"
  2594 // regions is correct.
  2596 class CheckClaimValuesClosure : public HeapRegionClosure {
  2597 private:
  2598   jint _claim_value;
  2599   size_t _failures;
  2600   HeapRegion* _sh_region;
  2601 public:
  2602   CheckClaimValuesClosure(jint claim_value) :
  2603     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  2604   bool doHeapRegion(HeapRegion* r) {
  2605     if (r->claim_value() != _claim_value) {
  2606       gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  2607                              "claim value = %d, should be %d",
  2608                              r->bottom(), r->end(), r->claim_value(),
  2609                              _claim_value);
  2610       ++_failures;
  2612     if (!r->isHumongous()) {
  2613       _sh_region = NULL;
  2614     } else if (r->startsHumongous()) {
  2615       _sh_region = r;
  2616     } else if (r->continuesHumongous()) {
  2617       if (r->humongous_start_region() != _sh_region) {
  2618         gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  2619                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  2620                                r->bottom(), r->end(),
  2621                                r->humongous_start_region(),
  2622                                _sh_region);
  2623         ++_failures;
  2626     return false;
  2628   size_t failures() {
  2629     return _failures;
  2631 };
  2633 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  2634   CheckClaimValuesClosure cl(claim_value);
  2635   heap_region_iterate(&cl);
  2636   return cl.failures() == 0;
  2638 #endif // ASSERT
  2640 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2641   HeapRegion* r = g1_policy()->collection_set();
  2642   while (r != NULL) {
  2643     HeapRegion* next = r->next_in_collection_set();
  2644     if (cl->doHeapRegion(r)) {
  2645       cl->incomplete();
  2646       return;
  2648     r = next;
  2652 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2653                                                   HeapRegionClosure *cl) {
  2654   if (r == NULL) {
  2655     // The CSet is empty so there's nothing to do.
  2656     return;
  2659   assert(r->in_collection_set(),
  2660          "Start region must be a member of the collection set.");
  2661   HeapRegion* cur = r;
  2662   while (cur != NULL) {
  2663     HeapRegion* next = cur->next_in_collection_set();
  2664     if (cl->doHeapRegion(cur) && false) {
  2665       cl->incomplete();
  2666       return;
  2668     cur = next;
  2670   cur = g1_policy()->collection_set();
  2671   while (cur != r) {
  2672     HeapRegion* next = cur->next_in_collection_set();
  2673     if (cl->doHeapRegion(cur) && false) {
  2674       cl->incomplete();
  2675       return;
  2677     cur = next;
  2681 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
  2682   return _hrs->length() > 0 ? _hrs->at(0) : NULL;
  2686 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2687   Space* res = heap_region_containing(addr);
  2688   if (res == NULL)
  2689     res = perm_gen()->space_containing(addr);
  2690   return res;
  2693 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2694   Space* sp = space_containing(addr);
  2695   if (sp != NULL) {
  2696     return sp->block_start(addr);
  2698   return NULL;
  2701 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  2702   Space* sp = space_containing(addr);
  2703   assert(sp != NULL, "block_size of address outside of heap");
  2704   return sp->block_size(addr);
  2707 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  2708   Space* sp = space_containing(addr);
  2709   return sp->block_is_obj(addr);
  2712 bool G1CollectedHeap::supports_tlab_allocation() const {
  2713   return true;
  2716 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  2717   return HeapRegion::GrainBytes;
  2720 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  2721   // Return the remaining space in the cur alloc region, but not less than
  2722   // the min TLAB size.
  2724   // Also, this value can be at most the humongous object threshold,
  2725   // since we can't allow tlabs to grow big enough to accomodate
  2726   // humongous objects.
  2728   // We need to store the cur alloc region locally, since it might change
  2729   // between when we test for NULL and when we use it later.
  2730   ContiguousSpace* cur_alloc_space = _cur_alloc_region;
  2731   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
  2733   if (cur_alloc_space == NULL) {
  2734     return max_tlab_size;
  2735   } else {
  2736     return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
  2737                 max_tlab_size);
  2741 size_t G1CollectedHeap::large_typearray_limit() {
  2742   // FIXME
  2743   return HeapRegion::GrainBytes/HeapWordSize;
  2746 size_t G1CollectedHeap::max_capacity() const {
  2747   return g1_reserved_obj_bytes();
  2750 jlong G1CollectedHeap::millis_since_last_gc() {
  2751   // assert(false, "NYI");
  2752   return 0;
  2755 void G1CollectedHeap::prepare_for_verify() {
  2756   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2757     ensure_parsability(false);
  2759   g1_rem_set()->prepare_for_verify();
  2762 class VerifyLivenessOopClosure: public OopClosure {
  2763   G1CollectedHeap* g1h;
  2764 public:
  2765   VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
  2766     g1h = _g1h;
  2768   void do_oop(narrowOop *p) { do_oop_work(p); }
  2769   void do_oop(      oop *p) { do_oop_work(p); }
  2771   template <class T> void do_oop_work(T *p) {
  2772     oop obj = oopDesc::load_decode_heap_oop(p);
  2773     guarantee(obj == NULL || !g1h->is_obj_dead(obj),
  2774               "Dead object referenced by a not dead object");
  2776 };
  2778 class VerifyObjsInRegionClosure: public ObjectClosure {
  2779 private:
  2780   G1CollectedHeap* _g1h;
  2781   size_t _live_bytes;
  2782   HeapRegion *_hr;
  2783   bool _use_prev_marking;
  2784 public:
  2785   // use_prev_marking == true  -> use "prev" marking information,
  2786   // use_prev_marking == false -> use "next" marking information
  2787   VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking)
  2788     : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) {
  2789     _g1h = G1CollectedHeap::heap();
  2791   void do_object(oop o) {
  2792     VerifyLivenessOopClosure isLive(_g1h);
  2793     assert(o != NULL, "Huh?");
  2794     if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
  2795       o->oop_iterate(&isLive);
  2796       if (!_hr->obj_allocated_since_prev_marking(o)) {
  2797         size_t obj_size = o->size();    // Make sure we don't overflow
  2798         _live_bytes += (obj_size * HeapWordSize);
  2802   size_t live_bytes() { return _live_bytes; }
  2803 };
  2805 class PrintObjsInRegionClosure : public ObjectClosure {
  2806   HeapRegion *_hr;
  2807   G1CollectedHeap *_g1;
  2808 public:
  2809   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  2810     _g1 = G1CollectedHeap::heap();
  2811   };
  2813   void do_object(oop o) {
  2814     if (o != NULL) {
  2815       HeapWord *start = (HeapWord *) o;
  2816       size_t word_sz = o->size();
  2817       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  2818                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  2819                           (void*) o, word_sz,
  2820                           _g1->isMarkedPrev(o),
  2821                           _g1->isMarkedNext(o),
  2822                           _hr->obj_allocated_since_prev_marking(o));
  2823       HeapWord *end = start + word_sz;
  2824       HeapWord *cur;
  2825       int *val;
  2826       for (cur = start; cur < end; cur++) {
  2827         val = (int *) cur;
  2828         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  2832 };
  2834 class VerifyRegionClosure: public HeapRegionClosure {
  2835 private:
  2836   bool _allow_dirty;
  2837   bool _par;
  2838   bool _use_prev_marking;
  2839   bool _failures;
  2840 public:
  2841   // use_prev_marking == true  -> use "prev" marking information,
  2842   // use_prev_marking == false -> use "next" marking information
  2843   VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
  2844     : _allow_dirty(allow_dirty),
  2845       _par(par),
  2846       _use_prev_marking(use_prev_marking),
  2847       _failures(false) {}
  2849   bool failures() {
  2850     return _failures;
  2853   bool doHeapRegion(HeapRegion* r) {
  2854     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
  2855               "Should be unclaimed at verify points.");
  2856     if (!r->continuesHumongous()) {
  2857       bool failures = false;
  2858       r->verify(_allow_dirty, _use_prev_marking, &failures);
  2859       if (failures) {
  2860         _failures = true;
  2861       } else {
  2862         VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
  2863         r->object_iterate(&not_dead_yet_cl);
  2864         if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
  2865           gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
  2866                                  "max_live_bytes "SIZE_FORMAT" "
  2867                                  "< calculated "SIZE_FORMAT,
  2868                                  r->bottom(), r->end(),
  2869                                  r->max_live_bytes(),
  2870                                  not_dead_yet_cl.live_bytes());
  2871           _failures = true;
  2875     return false; // stop the region iteration if we hit a failure
  2877 };
  2879 class VerifyRootsClosure: public OopsInGenClosure {
  2880 private:
  2881   G1CollectedHeap* _g1h;
  2882   bool             _use_prev_marking;
  2883   bool             _failures;
  2884 public:
  2885   // use_prev_marking == true  -> use "prev" marking information,
  2886   // use_prev_marking == false -> use "next" marking information
  2887   VerifyRootsClosure(bool use_prev_marking) :
  2888     _g1h(G1CollectedHeap::heap()),
  2889     _use_prev_marking(use_prev_marking),
  2890     _failures(false) { }
  2892   bool failures() { return _failures; }
  2894   template <class T> void do_oop_nv(T* p) {
  2895     T heap_oop = oopDesc::load_heap_oop(p);
  2896     if (!oopDesc::is_null(heap_oop)) {
  2897       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2898       if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
  2899         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  2900                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
  2901         obj->print_on(gclog_or_tty);
  2902         _failures = true;
  2907   void do_oop(oop* p)       { do_oop_nv(p); }
  2908   void do_oop(narrowOop* p) { do_oop_nv(p); }
  2909 };
  2911 // This is the task used for parallel heap verification.
  2913 class G1ParVerifyTask: public AbstractGangTask {
  2914 private:
  2915   G1CollectedHeap* _g1h;
  2916   bool _allow_dirty;
  2917   bool _use_prev_marking;
  2918   bool _failures;
  2920 public:
  2921   // use_prev_marking == true  -> use "prev" marking information,
  2922   // use_prev_marking == false -> use "next" marking information
  2923   G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
  2924                   bool use_prev_marking) :
  2925     AbstractGangTask("Parallel verify task"),
  2926     _g1h(g1h),
  2927     _allow_dirty(allow_dirty),
  2928     _use_prev_marking(use_prev_marking),
  2929     _failures(false) { }
  2931   bool failures() {
  2932     return _failures;
  2935   void work(int worker_i) {
  2936     HandleMark hm;
  2937     VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
  2938     _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
  2939                                           HeapRegion::ParVerifyClaimValue);
  2940     if (blk.failures()) {
  2941       _failures = true;
  2944 };
  2946 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
  2947   verify(allow_dirty, silent, /* use_prev_marking */ true);
  2950 void G1CollectedHeap::verify(bool allow_dirty,
  2951                              bool silent,
  2952                              bool use_prev_marking) {
  2953   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2954     if (!silent) { gclog_or_tty->print("roots "); }
  2955     VerifyRootsClosure rootsCl(use_prev_marking);
  2956     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
  2957     process_strong_roots(true,  // activate StrongRootsScope
  2958                          false,
  2959                          SharedHeap::SO_AllClasses,
  2960                          &rootsCl,
  2961                          &blobsCl,
  2962                          &rootsCl);
  2963     bool failures = rootsCl.failures();
  2964     rem_set()->invalidate(perm_gen()->used_region(), false);
  2965     if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
  2966     verify_region_sets();
  2967     if (!silent) { gclog_or_tty->print("HeapRegions "); }
  2968     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  2969       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2970              "sanity check");
  2972       G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
  2973       int n_workers = workers()->total_workers();
  2974       set_par_threads(n_workers);
  2975       workers()->run_task(&task);
  2976       set_par_threads(0);
  2977       if (task.failures()) {
  2978         failures = true;
  2981       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  2982              "sanity check");
  2984       reset_heap_region_claim_values();
  2986       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2987              "sanity check");
  2988     } else {
  2989       VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
  2990       _hrs->iterate(&blk);
  2991       if (blk.failures()) {
  2992         failures = true;
  2995     if (!silent) gclog_or_tty->print("RemSet ");
  2996     rem_set()->verify();
  2998     if (failures) {
  2999       gclog_or_tty->print_cr("Heap:");
  3000       print_on(gclog_or_tty, true /* extended */);
  3001       gclog_or_tty->print_cr("");
  3002 #ifndef PRODUCT
  3003       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
  3004         concurrent_mark()->print_reachable("at-verification-failure",
  3005                                            use_prev_marking, false /* all */);
  3007 #endif
  3008       gclog_or_tty->flush();
  3010     guarantee(!failures, "there should not have been any failures");
  3011   } else {
  3012     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
  3016 class PrintRegionClosure: public HeapRegionClosure {
  3017   outputStream* _st;
  3018 public:
  3019   PrintRegionClosure(outputStream* st) : _st(st) {}
  3020   bool doHeapRegion(HeapRegion* r) {
  3021     r->print_on(_st);
  3022     return false;
  3024 };
  3026 void G1CollectedHeap::print() const { print_on(tty); }
  3028 void G1CollectedHeap::print_on(outputStream* st) const {
  3029   print_on(st, PrintHeapAtGCExtended);
  3032 void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
  3033   st->print(" %-20s", "garbage-first heap");
  3034   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  3035             capacity()/K, used_unlocked()/K);
  3036   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  3037             _g1_storage.low_boundary(),
  3038             _g1_storage.high(),
  3039             _g1_storage.high_boundary());
  3040   st->cr();
  3041   st->print("  region size " SIZE_FORMAT "K, ",
  3042             HeapRegion::GrainBytes/K);
  3043   size_t young_regions = _young_list->length();
  3044   st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
  3045             young_regions, young_regions * HeapRegion::GrainBytes / K);
  3046   size_t survivor_regions = g1_policy()->recorded_survivor_regions();
  3047   st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
  3048             survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
  3049   st->cr();
  3050   perm()->as_gen()->print_on(st);
  3051   if (extended) {
  3052     st->cr();
  3053     print_on_extended(st);
  3057 void G1CollectedHeap::print_on_extended(outputStream* st) const {
  3058   PrintRegionClosure blk(st);
  3059   _hrs->iterate(&blk);
  3062 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  3063   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3064     workers()->print_worker_threads_on(st);
  3066   _cmThread->print_on(st);
  3067   st->cr();
  3068   _cm->print_worker_threads_on(st);
  3069   _cg1r->print_worker_threads_on(st);
  3070   st->cr();
  3073 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  3074   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3075     workers()->threads_do(tc);
  3077   tc->do_thread(_cmThread);
  3078   _cg1r->threads_do(tc);
  3081 void G1CollectedHeap::print_tracing_info() const {
  3082   // We'll overload this to mean "trace GC pause statistics."
  3083   if (TraceGen0Time || TraceGen1Time) {
  3084     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  3085     // to that.
  3086     g1_policy()->print_tracing_info();
  3088   if (G1SummarizeRSetStats) {
  3089     g1_rem_set()->print_summary_info();
  3091   if (G1SummarizeConcMark) {
  3092     concurrent_mark()->print_summary_info();
  3094   g1_policy()->print_yg_surv_rate_info();
  3095   SpecializationStats::print();
  3098 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
  3099   HeapRegion* hr = heap_region_containing(addr);
  3100   if (hr == NULL) {
  3101     return 0;
  3102   } else {
  3103     return 1;
  3107 G1CollectedHeap* G1CollectedHeap::heap() {
  3108   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  3109          "not a garbage-first heap");
  3110   return _g1h;
  3113 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  3114   // always_do_update_barrier = false;
  3115   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  3116   // Call allocation profiler
  3117   AllocationProfiler::iterate_since_last_gc();
  3118   // Fill TLAB's and such
  3119   ensure_parsability(true);
  3122 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  3123   // FIXME: what is this about?
  3124   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  3125   // is set.
  3126   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  3127                         "derived pointer present"));
  3128   // always_do_update_barrier = true;
  3131 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  3132                                                unsigned int gc_count_before,
  3133                                                bool* succeeded) {
  3134   assert_heap_not_locked_and_not_at_safepoint();
  3135   g1_policy()->record_stop_world_start();
  3136   VM_G1IncCollectionPause op(gc_count_before,
  3137                              word_size,
  3138                              false, /* should_initiate_conc_mark */
  3139                              g1_policy()->max_pause_time_ms(),
  3140                              GCCause::_g1_inc_collection_pause);
  3141   VMThread::execute(&op);
  3143   HeapWord* result = op.result();
  3144   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
  3145   assert(result == NULL || ret_succeeded,
  3146          "the result should be NULL if the VM did not succeed");
  3147   *succeeded = ret_succeeded;
  3149   assert_heap_not_locked();
  3150   return result;
  3153 void
  3154 G1CollectedHeap::doConcurrentMark() {
  3155   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  3156   if (!_cmThread->in_progress()) {
  3157     _cmThread->set_started();
  3158     CGC_lock->notify();
  3162 class VerifyMarkedObjsClosure: public ObjectClosure {
  3163     G1CollectedHeap* _g1h;
  3164     public:
  3165     VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
  3166     void do_object(oop obj) {
  3167       assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
  3168              "markandsweep mark should agree with concurrent deadness");
  3170 };
  3172 void
  3173 G1CollectedHeap::checkConcurrentMark() {
  3174     VerifyMarkedObjsClosure verifycl(this);
  3175     //    MutexLockerEx x(getMarkBitMapLock(),
  3176     //              Mutex::_no_safepoint_check_flag);
  3177     object_iterate(&verifycl, false);
  3180 void G1CollectedHeap::do_sync_mark() {
  3181   _cm->checkpointRootsInitial();
  3182   _cm->markFromRoots();
  3183   _cm->checkpointRootsFinal(false);
  3186 // <NEW PREDICTION>
  3188 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
  3189                                                        bool young) {
  3190   return _g1_policy->predict_region_elapsed_time_ms(hr, young);
  3193 void G1CollectedHeap::check_if_region_is_too_expensive(double
  3194                                                            predicted_time_ms) {
  3195   _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
  3198 size_t G1CollectedHeap::pending_card_num() {
  3199   size_t extra_cards = 0;
  3200   JavaThread *curr = Threads::first();
  3201   while (curr != NULL) {
  3202     DirtyCardQueue& dcq = curr->dirty_card_queue();
  3203     extra_cards += dcq.size();
  3204     curr = curr->next();
  3206   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3207   size_t buffer_size = dcqs.buffer_size();
  3208   size_t buffer_num = dcqs.completed_buffers_num();
  3209   return buffer_size * buffer_num + extra_cards;
  3212 size_t G1CollectedHeap::max_pending_card_num() {
  3213   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3214   size_t buffer_size = dcqs.buffer_size();
  3215   size_t buffer_num  = dcqs.completed_buffers_num();
  3216   int thread_num  = Threads::number_of_threads();
  3217   return (buffer_num + thread_num) * buffer_size;
  3220 size_t G1CollectedHeap::cards_scanned() {
  3221   return g1_rem_set()->cardsScanned();
  3224 void
  3225 G1CollectedHeap::setup_surviving_young_words() {
  3226   guarantee( _surviving_young_words == NULL, "pre-condition" );
  3227   size_t array_length = g1_policy()->young_cset_length();
  3228   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
  3229   if (_surviving_young_words == NULL) {
  3230     vm_exit_out_of_memory(sizeof(size_t) * array_length,
  3231                           "Not enough space for young surv words summary.");
  3233   memset(_surviving_young_words, 0, array_length * sizeof(size_t));
  3234 #ifdef ASSERT
  3235   for (size_t i = 0;  i < array_length; ++i) {
  3236     assert( _surviving_young_words[i] == 0, "memset above" );
  3238 #endif // !ASSERT
  3241 void
  3242 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  3243   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3244   size_t array_length = g1_policy()->young_cset_length();
  3245   for (size_t i = 0; i < array_length; ++i)
  3246     _surviving_young_words[i] += surv_young_words[i];
  3249 void
  3250 G1CollectedHeap::cleanup_surviving_young_words() {
  3251   guarantee( _surviving_young_words != NULL, "pre-condition" );
  3252   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
  3253   _surviving_young_words = NULL;
  3256 // </NEW PREDICTION>
  3258 struct PrepareForRSScanningClosure : public HeapRegionClosure {
  3259   bool doHeapRegion(HeapRegion *r) {
  3260     r->rem_set()->set_iter_claimed(0);
  3261     return false;
  3263 };
  3265 #if TASKQUEUE_STATS
  3266 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
  3267   st->print_raw_cr("GC Task Stats");
  3268   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
  3269   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
  3272 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
  3273   print_taskqueue_stats_hdr(st);
  3275   TaskQueueStats totals;
  3276   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3277   for (int i = 0; i < n; ++i) {
  3278     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
  3279     totals += task_queue(i)->stats;
  3281   st->print_raw("tot "); totals.print(st); st->cr();
  3283   DEBUG_ONLY(totals.verify());
  3286 void G1CollectedHeap::reset_taskqueue_stats() {
  3287   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3288   for (int i = 0; i < n; ++i) {
  3289     task_queue(i)->stats.reset();
  3292 #endif // TASKQUEUE_STATS
  3294 bool
  3295 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
  3296   assert_at_safepoint(true /* should_be_vm_thread */);
  3297   guarantee(!is_gc_active(), "collection is not reentrant");
  3299   if (GC_locker::check_active_before_gc()) {
  3300     return false;
  3303   SvcGCMarker sgcm(SvcGCMarker::MINOR);
  3304   ResourceMark rm;
  3306   if (PrintHeapAtGC) {
  3307     Universe::print_heap_before_gc();
  3310   verify_region_sets_optional();
  3313     // This call will decide whether this pause is an initial-mark
  3314     // pause. If it is, during_initial_mark_pause() will return true
  3315     // for the duration of this pause.
  3316     g1_policy()->decide_on_conc_mark_initiation();
  3318     char verbose_str[128];
  3319     sprintf(verbose_str, "GC pause ");
  3320     if (g1_policy()->in_young_gc_mode()) {
  3321       if (g1_policy()->full_young_gcs())
  3322         strcat(verbose_str, "(young)");
  3323       else
  3324         strcat(verbose_str, "(partial)");
  3326     if (g1_policy()->during_initial_mark_pause()) {
  3327       strcat(verbose_str, " (initial-mark)");
  3328       // We are about to start a marking cycle, so we increment the
  3329       // full collection counter.
  3330       increment_total_full_collections();
  3333     // if PrintGCDetails is on, we'll print long statistics information
  3334     // in the collector policy code, so let's not print this as the output
  3335     // is messy if we do.
  3336     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  3337     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  3338     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
  3340     TraceMemoryManagerStats tms(false /* fullGC */);
  3342     // If there are any free regions available on the secondary_free_list
  3343     // make sure we append them to the free_list. However, we don't
  3344     // have to wait for the rest of the cleanup operation to
  3345     // finish. If it's still going on that's OK. If we run out of
  3346     // regions, the region allocation code will check the
  3347     // secondary_free_list and potentially wait if more free regions
  3348     // are coming (see new_region_try_secondary_free_list()).
  3349     if (!G1StressConcRegionFreeing) {
  3350       append_secondary_free_list_if_not_empty();
  3353     increment_gc_time_stamp();
  3355     if (g1_policy()->in_young_gc_mode()) {
  3356       assert(check_young_list_well_formed(),
  3357              "young list should be well formed");
  3360     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  3361       IsGCActiveMark x;
  3363       gc_prologue(false);
  3364       increment_total_collections(false /* full gc */);
  3366 #if G1_REM_SET_LOGGING
  3367       gclog_or_tty->print_cr("\nJust chose CS, heap:");
  3368       print();
  3369 #endif
  3371       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  3372         HandleMark hm;  // Discard invalid handles created during verification
  3373         prepare_for_verify();
  3374         gclog_or_tty->print(" VerifyBeforeGC:");
  3375         Universe::verify(false);
  3378       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3380       // Please see comment in G1CollectedHeap::ref_processing_init()
  3381       // to see how reference processing currently works in G1.
  3382       //
  3383       // We want to turn off ref discovery, if necessary, and turn it back on
  3384       // on again later if we do. XXX Dubious: why is discovery disabled?
  3385       bool was_enabled = ref_processor()->discovery_enabled();
  3386       if (was_enabled) ref_processor()->disable_discovery();
  3388       // Forget the current alloc region (we might even choose it to be part
  3389       // of the collection set!).
  3390       abandon_cur_alloc_region();
  3392       // The elapsed time induced by the start time below deliberately elides
  3393       // the possible verification above.
  3394       double start_time_sec = os::elapsedTime();
  3395       size_t start_used_bytes = used();
  3397 #if YOUNG_LIST_VERBOSE
  3398       gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  3399       _young_list->print();
  3400       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3401 #endif // YOUNG_LIST_VERBOSE
  3403       g1_policy()->record_collection_pause_start(start_time_sec,
  3404                                                  start_used_bytes);
  3406 #if YOUNG_LIST_VERBOSE
  3407       gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  3408       _young_list->print();
  3409 #endif // YOUNG_LIST_VERBOSE
  3411       if (g1_policy()->during_initial_mark_pause()) {
  3412         concurrent_mark()->checkpointRootsInitialPre();
  3414       save_marks();
  3416       // We must do this before any possible evacuation that should propagate
  3417       // marks.
  3418       if (mark_in_progress()) {
  3419         double start_time_sec = os::elapsedTime();
  3421         _cm->drainAllSATBBuffers();
  3422         double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  3423         g1_policy()->record_satb_drain_time(finish_mark_ms);
  3425       // Record the number of elements currently on the mark stack, so we
  3426       // only iterate over these.  (Since evacuation may add to the mark
  3427       // stack, doing more exposes race conditions.)  If no mark is in
  3428       // progress, this will be zero.
  3429       _cm->set_oops_do_bound();
  3431       if (mark_in_progress())
  3432         concurrent_mark()->newCSet();
  3434 #if YOUNG_LIST_VERBOSE
  3435       gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  3436       _young_list->print();
  3437       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3438 #endif // YOUNG_LIST_VERBOSE
  3440       g1_policy()->choose_collection_set(target_pause_time_ms);
  3442       // Nothing to do if we were unable to choose a collection set.
  3443 #if G1_REM_SET_LOGGING
  3444       gclog_or_tty->print_cr("\nAfter pause, heap:");
  3445       print();
  3446 #endif
  3447       PrepareForRSScanningClosure prepare_for_rs_scan;
  3448       collection_set_iterate(&prepare_for_rs_scan);
  3450       setup_surviving_young_words();
  3452       // Set up the gc allocation regions.
  3453       get_gc_alloc_regions();
  3455       // Actually do the work...
  3456       evacuate_collection_set();
  3458       free_collection_set(g1_policy()->collection_set());
  3459       g1_policy()->clear_collection_set();
  3461       cleanup_surviving_young_words();
  3463       // Start a new incremental collection set for the next pause.
  3464       g1_policy()->start_incremental_cset_building();
  3466       // Clear the _cset_fast_test bitmap in anticipation of adding
  3467       // regions to the incremental collection set for the next
  3468       // evacuation pause.
  3469       clear_cset_fast_test();
  3471       if (g1_policy()->in_young_gc_mode()) {
  3472         _young_list->reset_sampled_info();
  3474         // Don't check the whole heap at this point as the
  3475         // GC alloc regions from this pause have been tagged
  3476         // as survivors and moved on to the survivor list.
  3477         // Survivor regions will fail the !is_young() check.
  3478         assert(check_young_list_empty(false /* check_heap */),
  3479                "young list should be empty");
  3481 #if YOUNG_LIST_VERBOSE
  3482         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
  3483         _young_list->print();
  3484 #endif // YOUNG_LIST_VERBOSE
  3486         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  3487                                           _young_list->first_survivor_region(),
  3488                                           _young_list->last_survivor_region());
  3490         _young_list->reset_auxilary_lists();
  3493       if (evacuation_failed()) {
  3494         _summary_bytes_used = recalculate_used();
  3495       } else {
  3496         // The "used" of the the collection set have already been subtracted
  3497         // when they were freed.  Add in the bytes evacuated.
  3498         _summary_bytes_used += g1_policy()->bytes_in_to_space();
  3501       if (g1_policy()->in_young_gc_mode() &&
  3502           g1_policy()->during_initial_mark_pause()) {
  3503         concurrent_mark()->checkpointRootsInitialPost();
  3504         set_marking_started();
  3505         // CAUTION: after the doConcurrentMark() call below,
  3506         // the concurrent marking thread(s) could be running
  3507         // concurrently with us. Make sure that anything after
  3508         // this point does not assume that we are the only GC thread
  3509         // running. Note: of course, the actual marking work will
  3510         // not start until the safepoint itself is released in
  3511         // ConcurrentGCThread::safepoint_desynchronize().
  3512         doConcurrentMark();
  3515 #if YOUNG_LIST_VERBOSE
  3516       gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  3517       _young_list->print();
  3518       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3519 #endif // YOUNG_LIST_VERBOSE
  3521       double end_time_sec = os::elapsedTime();
  3522       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  3523       g1_policy()->record_pause_time_ms(pause_time_ms);
  3524       g1_policy()->record_collection_pause_end();
  3526       MemoryService::track_memory_usage();
  3528       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  3529         HandleMark hm;  // Discard invalid handles created during verification
  3530         gclog_or_tty->print(" VerifyAfterGC:");
  3531         prepare_for_verify();
  3532         Universe::verify(false);
  3535       if (was_enabled) ref_processor()->enable_discovery();
  3538         size_t expand_bytes = g1_policy()->expansion_amount();
  3539         if (expand_bytes > 0) {
  3540           size_t bytes_before = capacity();
  3541           expand(expand_bytes);
  3545       if (mark_in_progress()) {
  3546         concurrent_mark()->update_g1_committed();
  3549 #ifdef TRACESPINNING
  3550       ParallelTaskTerminator::print_termination_counts();
  3551 #endif
  3553       gc_epilogue(false);
  3556     if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
  3557       gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
  3558       print_tracing_info();
  3559       vm_exit(-1);
  3563   verify_region_sets_optional();
  3565   TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
  3566   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  3568   if (PrintHeapAtGC) {
  3569     Universe::print_heap_after_gc();
  3571   if (G1SummarizeRSetStats &&
  3572       (G1SummarizeRSetStatsPeriod > 0) &&
  3573       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  3574     g1_rem_set()->print_summary_info();
  3577   return true;
  3580 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
  3582   size_t gclab_word_size;
  3583   switch (purpose) {
  3584     case GCAllocForSurvived:
  3585       gclab_word_size = YoungPLABSize;
  3586       break;
  3587     case GCAllocForTenured:
  3588       gclab_word_size = OldPLABSize;
  3589       break;
  3590     default:
  3591       assert(false, "unknown GCAllocPurpose");
  3592       gclab_word_size = OldPLABSize;
  3593       break;
  3595   return gclab_word_size;
  3599 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
  3600   assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
  3601   // make sure we don't call set_gc_alloc_region() multiple times on
  3602   // the same region
  3603   assert(r == NULL || !r->is_gc_alloc_region(),
  3604          "shouldn't already be a GC alloc region");
  3605   assert(r == NULL || !r->isHumongous(),
  3606          "humongous regions shouldn't be used as GC alloc regions");
  3608   HeapWord* original_top = NULL;
  3609   if (r != NULL)
  3610     original_top = r->top();
  3612   // We will want to record the used space in r as being there before gc.
  3613   // One we install it as a GC alloc region it's eligible for allocation.
  3614   // So record it now and use it later.
  3615   size_t r_used = 0;
  3616   if (r != NULL) {
  3617     r_used = r->used();
  3619     if (G1CollectedHeap::use_parallel_gc_threads()) {
  3620       // need to take the lock to guard against two threads calling
  3621       // get_gc_alloc_region concurrently (very unlikely but...)
  3622       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3623       r->save_marks();
  3626   HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
  3627   _gc_alloc_regions[purpose] = r;
  3628   if (old_alloc_region != NULL) {
  3629     // Replace aliases too.
  3630     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3631       if (_gc_alloc_regions[ap] == old_alloc_region) {
  3632         _gc_alloc_regions[ap] = r;
  3636   if (r != NULL) {
  3637     push_gc_alloc_region(r);
  3638     if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
  3639       // We are using a region as a GC alloc region after it has been used
  3640       // as a mutator allocation region during the current marking cycle.
  3641       // The mutator-allocated objects are currently implicitly marked, but
  3642       // when we move hr->next_top_at_mark_start() forward at the the end
  3643       // of the GC pause, they won't be.  We therefore mark all objects in
  3644       // the "gap".  We do this object-by-object, since marking densely
  3645       // does not currently work right with marking bitmap iteration.  This
  3646       // means we rely on TLAB filling at the start of pauses, and no
  3647       // "resuscitation" of filled TLAB's.  If we want to do this, we need
  3648       // to fix the marking bitmap iteration.
  3649       HeapWord* curhw = r->next_top_at_mark_start();
  3650       HeapWord* t = original_top;
  3652       while (curhw < t) {
  3653         oop cur = (oop)curhw;
  3654         // We'll assume parallel for generality.  This is rare code.
  3655         concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
  3656         curhw = curhw + cur->size();
  3658       assert(curhw == t, "Should have parsed correctly.");
  3660     if (G1PolicyVerbose > 1) {
  3661       gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
  3662                           "for survivors:", r->bottom(), original_top, r->end());
  3663       r->print();
  3665     g1_policy()->record_before_bytes(r_used);
  3669 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
  3670   assert(Thread::current()->is_VM_thread() ||
  3671          FreeList_lock->owned_by_self(), "Precondition");
  3672   assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
  3673          "Precondition.");
  3674   hr->set_is_gc_alloc_region(true);
  3675   hr->set_next_gc_alloc_region(_gc_alloc_region_list);
  3676   _gc_alloc_region_list = hr;
  3679 #ifdef G1_DEBUG
  3680 class FindGCAllocRegion: public HeapRegionClosure {
  3681 public:
  3682   bool doHeapRegion(HeapRegion* r) {
  3683     if (r->is_gc_alloc_region()) {
  3684       gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
  3685                              r->hrs_index(), r->bottom());
  3687     return false;
  3689 };
  3690 #endif // G1_DEBUG
  3692 void G1CollectedHeap::forget_alloc_region_list() {
  3693   assert_at_safepoint(true /* should_be_vm_thread */);
  3694   while (_gc_alloc_region_list != NULL) {
  3695     HeapRegion* r = _gc_alloc_region_list;
  3696     assert(r->is_gc_alloc_region(), "Invariant.");
  3697     // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
  3698     // newly allocated data in order to be able to apply deferred updates
  3699     // before the GC is done for verification purposes (i.e to allow
  3700     // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
  3701     // collection.
  3702     r->ContiguousSpace::set_saved_mark();
  3703     _gc_alloc_region_list = r->next_gc_alloc_region();
  3704     r->set_next_gc_alloc_region(NULL);
  3705     r->set_is_gc_alloc_region(false);
  3706     if (r->is_survivor()) {
  3707       if (r->is_empty()) {
  3708         r->set_not_young();
  3709       } else {
  3710         _young_list->add_survivor_region(r);
  3714 #ifdef G1_DEBUG
  3715   FindGCAllocRegion fa;
  3716   heap_region_iterate(&fa);
  3717 #endif // G1_DEBUG
  3721 bool G1CollectedHeap::check_gc_alloc_regions() {
  3722   // TODO: allocation regions check
  3723   return true;
  3726 void G1CollectedHeap::get_gc_alloc_regions() {
  3727   // First, let's check that the GC alloc region list is empty (it should)
  3728   assert(_gc_alloc_region_list == NULL, "invariant");
  3730   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3731     assert(_gc_alloc_regions[ap] == NULL, "invariant");
  3732     assert(_gc_alloc_region_counts[ap] == 0, "invariant");
  3734     // Create new GC alloc regions.
  3735     HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
  3736     _retained_gc_alloc_regions[ap] = NULL;
  3738     if (alloc_region != NULL) {
  3739       assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
  3741       // let's make sure that the GC alloc region is not tagged as such
  3742       // outside a GC operation
  3743       assert(!alloc_region->is_gc_alloc_region(), "sanity");
  3745       if (alloc_region->in_collection_set() ||
  3746           alloc_region->top() == alloc_region->end() ||
  3747           alloc_region->top() == alloc_region->bottom() ||
  3748           alloc_region->isHumongous()) {
  3749         // we will discard the current GC alloc region if
  3750         // * it's in the collection set (it can happen!),
  3751         // * it's already full (no point in using it),
  3752         // * it's empty (this means that it was emptied during
  3753         // a cleanup and it should be on the free list now), or
  3754         // * it's humongous (this means that it was emptied
  3755         // during a cleanup and was added to the free list, but
  3756         // has been subseqently used to allocate a humongous
  3757         // object that may be less than the region size).
  3759         alloc_region = NULL;
  3763     if (alloc_region == NULL) {
  3764       // we will get a new GC alloc region
  3765       alloc_region = new_gc_alloc_region(ap, 0);
  3766     } else {
  3767       // the region was retained from the last collection
  3768       ++_gc_alloc_region_counts[ap];
  3769       if (G1PrintHeapRegions) {
  3770         gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
  3771                                "top "PTR_FORMAT,
  3772                                alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
  3776     if (alloc_region != NULL) {
  3777       assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
  3778       set_gc_alloc_region(ap, alloc_region);
  3781     assert(_gc_alloc_regions[ap] == NULL ||
  3782            _gc_alloc_regions[ap]->is_gc_alloc_region(),
  3783            "the GC alloc region should be tagged as such");
  3784     assert(_gc_alloc_regions[ap] == NULL ||
  3785            _gc_alloc_regions[ap] == _gc_alloc_region_list,
  3786            "the GC alloc region should be the same as the GC alloc list head");
  3788   // Set alternative regions for allocation purposes that have reached
  3789   // their limit.
  3790   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3791     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
  3792     if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
  3793       _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
  3796   assert(check_gc_alloc_regions(), "alloc regions messed up");
  3799 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
  3800   // We keep a separate list of all regions that have been alloc regions in
  3801   // the current collection pause. Forget that now. This method will
  3802   // untag the GC alloc regions and tear down the GC alloc region
  3803   // list. It's desirable that no regions are tagged as GC alloc
  3804   // outside GCs.
  3806   forget_alloc_region_list();
  3808   // The current alloc regions contain objs that have survived
  3809   // collection. Make them no longer GC alloc regions.
  3810   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3811     HeapRegion* r = _gc_alloc_regions[ap];
  3812     _retained_gc_alloc_regions[ap] = NULL;
  3813     _gc_alloc_region_counts[ap] = 0;
  3815     if (r != NULL) {
  3816       // we retain nothing on _gc_alloc_regions between GCs
  3817       set_gc_alloc_region(ap, NULL);
  3819       if (r->is_empty()) {
  3820         // We didn't actually allocate anything in it; let's just put
  3821         // it back on the free list.
  3822         _free_list.add_as_tail(r);
  3823       } else if (_retain_gc_alloc_region[ap] && !totally) {
  3824         // retain it so that we can use it at the beginning of the next GC
  3825         _retained_gc_alloc_regions[ap] = r;
  3831 #ifndef PRODUCT
  3832 // Useful for debugging
  3834 void G1CollectedHeap::print_gc_alloc_regions() {
  3835   gclog_or_tty->print_cr("GC alloc regions");
  3836   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3837     HeapRegion* r = _gc_alloc_regions[ap];
  3838     if (r == NULL) {
  3839       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT, ap, NULL);
  3840     } else {
  3841       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT" "SIZE_FORMAT,
  3842                              ap, r->bottom(), r->used());
  3846 #endif // PRODUCT
  3848 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  3849   _drain_in_progress = false;
  3850   set_evac_failure_closure(cl);
  3851   _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3854 void G1CollectedHeap::finalize_for_evac_failure() {
  3855   assert(_evac_failure_scan_stack != NULL &&
  3856          _evac_failure_scan_stack->length() == 0,
  3857          "Postcondition");
  3858   assert(!_drain_in_progress, "Postcondition");
  3859   delete _evac_failure_scan_stack;
  3860   _evac_failure_scan_stack = NULL;
  3865 // *** Sequential G1 Evacuation
  3867 class G1IsAliveClosure: public BoolObjectClosure {
  3868   G1CollectedHeap* _g1;
  3869 public:
  3870   G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3871   void do_object(oop p) { assert(false, "Do not call."); }
  3872   bool do_object_b(oop p) {
  3873     // It is reachable if it is outside the collection set, or is inside
  3874     // and forwarded.
  3876 #ifdef G1_DEBUG
  3877     gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
  3878                            (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
  3879                            !_g1->obj_in_cs(p) || p->is_forwarded());
  3880 #endif // G1_DEBUG
  3882     return !_g1->obj_in_cs(p) || p->is_forwarded();
  3884 };
  3886 class G1KeepAliveClosure: public OopClosure {
  3887   G1CollectedHeap* _g1;
  3888 public:
  3889   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3890   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  3891   void do_oop(      oop* p) {
  3892     oop obj = *p;
  3893 #ifdef G1_DEBUG
  3894     if (PrintGC && Verbose) {
  3895       gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
  3896                              p, (void*) obj, (void*) *p);
  3898 #endif // G1_DEBUG
  3900     if (_g1->obj_in_cs(obj)) {
  3901       assert( obj->is_forwarded(), "invariant" );
  3902       *p = obj->forwardee();
  3903 #ifdef G1_DEBUG
  3904       gclog_or_tty->print_cr("     in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
  3905                              (void*) obj, (void*) *p);
  3906 #endif // G1_DEBUG
  3909 };
  3911 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
  3912 private:
  3913   G1CollectedHeap* _g1;
  3914   DirtyCardQueue *_dcq;
  3915   CardTableModRefBS* _ct_bs;
  3917 public:
  3918   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
  3919     _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
  3921   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  3922   virtual void do_oop(      oop* p) { do_oop_work(p); }
  3923   template <class T> void do_oop_work(T* p) {
  3924     assert(_from->is_in_reserved(p), "paranoia");
  3925     if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
  3926         !_from->is_survivor()) {
  3927       size_t card_index = _ct_bs->index_for(p);
  3928       if (_ct_bs->mark_card_deferred(card_index)) {
  3929         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
  3933 };
  3935 class RemoveSelfPointerClosure: public ObjectClosure {
  3936 private:
  3937   G1CollectedHeap* _g1;
  3938   ConcurrentMark* _cm;
  3939   HeapRegion* _hr;
  3940   size_t _prev_marked_bytes;
  3941   size_t _next_marked_bytes;
  3942   OopsInHeapRegionClosure *_cl;
  3943 public:
  3944   RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr,
  3945                            OopsInHeapRegionClosure* cl) :
  3946     _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
  3947     _next_marked_bytes(0), _cl(cl) {}
  3949   size_t prev_marked_bytes() { return _prev_marked_bytes; }
  3950   size_t next_marked_bytes() { return _next_marked_bytes; }
  3952   // <original comment>
  3953   // The original idea here was to coalesce evacuated and dead objects.
  3954   // However that caused complications with the block offset table (BOT).
  3955   // In particular if there were two TLABs, one of them partially refined.
  3956   // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
  3957   // The BOT entries of the unrefined part of TLAB_2 point to the start
  3958   // of TLAB_2. If the last object of the TLAB_1 and the first object
  3959   // of TLAB_2 are coalesced, then the cards of the unrefined part
  3960   // would point into middle of the filler object.
  3961   // The current approach is to not coalesce and leave the BOT contents intact.
  3962   // </original comment>
  3963   //
  3964   // We now reset the BOT when we start the object iteration over the
  3965   // region and refine its entries for every object we come across. So
  3966   // the above comment is not really relevant and we should be able
  3967   // to coalesce dead objects if we want to.
  3968   void do_object(oop obj) {
  3969     HeapWord* obj_addr = (HeapWord*) obj;
  3970     assert(_hr->is_in(obj_addr), "sanity");
  3971     size_t obj_size = obj->size();
  3972     _hr->update_bot_for_object(obj_addr, obj_size);
  3973     if (obj->is_forwarded() && obj->forwardee() == obj) {
  3974       // The object failed to move.
  3975       assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
  3976       _cm->markPrev(obj);
  3977       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3978       _prev_marked_bytes += (obj_size * HeapWordSize);
  3979       if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
  3980         _cm->markAndGrayObjectIfNecessary(obj);
  3982       obj->set_mark(markOopDesc::prototype());
  3983       // While we were processing RSet buffers during the
  3984       // collection, we actually didn't scan any cards on the
  3985       // collection set, since we didn't want to update remebered
  3986       // sets with entries that point into the collection set, given
  3987       // that live objects fromthe collection set are about to move
  3988       // and such entries will be stale very soon. This change also
  3989       // dealt with a reliability issue which involved scanning a
  3990       // card in the collection set and coming across an array that
  3991       // was being chunked and looking malformed. The problem is
  3992       // that, if evacuation fails, we might have remembered set
  3993       // entries missing given that we skipped cards on the
  3994       // collection set. So, we'll recreate such entries now.
  3995       obj->oop_iterate(_cl);
  3996       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3997     } else {
  3998       // The object has been either evacuated or is dead. Fill it with a
  3999       // dummy object.
  4000       MemRegion mr((HeapWord*)obj, obj_size);
  4001       CollectedHeap::fill_with_object(mr);
  4002       _cm->clearRangeBothMaps(mr);
  4005 };
  4007 void G1CollectedHeap::remove_self_forwarding_pointers() {
  4008   UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
  4009   DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
  4010   UpdateRSetDeferred deferred_update(_g1h, &dcq);
  4011   OopsInHeapRegionClosure *cl;
  4012   if (G1DeferredRSUpdate) {
  4013     cl = &deferred_update;
  4014   } else {
  4015     cl = &immediate_update;
  4017   HeapRegion* cur = g1_policy()->collection_set();
  4018   while (cur != NULL) {
  4019     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  4020     assert(!cur->isHumongous(), "sanity");
  4022     if (cur->evacuation_failed()) {
  4023       assert(cur->in_collection_set(), "bad CS");
  4024       RemoveSelfPointerClosure rspc(_g1h, cur, cl);
  4026       cur->reset_bot();
  4027       cl->set_region(cur);
  4028       cur->object_iterate(&rspc);
  4030       // A number of manipulations to make the TAMS be the current top,
  4031       // and the marked bytes be the ones observed in the iteration.
  4032       if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
  4033         // The comments below are the postconditions achieved by the
  4034         // calls.  Note especially the last such condition, which says that
  4035         // the count of marked bytes has been properly restored.
  4036         cur->note_start_of_marking(false);
  4037         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  4038         cur->add_to_marked_bytes(rspc.prev_marked_bytes());
  4039         // _next_marked_bytes == prev_marked_bytes.
  4040         cur->note_end_of_marking();
  4041         // _prev_top_at_mark_start == top(),
  4042         // _prev_marked_bytes == prev_marked_bytes
  4044       // If there is no mark in progress, we modified the _next variables
  4045       // above needlessly, but harmlessly.
  4046       if (_g1h->mark_in_progress()) {
  4047         cur->note_start_of_marking(false);
  4048         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  4049         // _next_marked_bytes == next_marked_bytes.
  4052       // Now make sure the region has the right index in the sorted array.
  4053       g1_policy()->note_change_in_marked_bytes(cur);
  4055     cur = cur->next_in_collection_set();
  4057   assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  4059   // Now restore saved marks, if any.
  4060   if (_objs_with_preserved_marks != NULL) {
  4061     assert(_preserved_marks_of_objs != NULL, "Both or none.");
  4062     guarantee(_objs_with_preserved_marks->length() ==
  4063               _preserved_marks_of_objs->length(), "Both or none.");
  4064     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
  4065       oop obj   = _objs_with_preserved_marks->at(i);
  4066       markOop m = _preserved_marks_of_objs->at(i);
  4067       obj->set_mark(m);
  4069     // Delete the preserved marks growable arrays (allocated on the C heap).
  4070     delete _objs_with_preserved_marks;
  4071     delete _preserved_marks_of_objs;
  4072     _objs_with_preserved_marks = NULL;
  4073     _preserved_marks_of_objs = NULL;
  4077 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  4078   _evac_failure_scan_stack->push(obj);
  4081 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  4082   assert(_evac_failure_scan_stack != NULL, "precondition");
  4084   while (_evac_failure_scan_stack->length() > 0) {
  4085      oop obj = _evac_failure_scan_stack->pop();
  4086      _evac_failure_closure->set_region(heap_region_containing(obj));
  4087      obj->oop_iterate_backwards(_evac_failure_closure);
  4091 oop
  4092 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  4093                                                oop old) {
  4094   markOop m = old->mark();
  4095   oop forward_ptr = old->forward_to_atomic(old);
  4096   if (forward_ptr == NULL) {
  4097     // Forward-to-self succeeded.
  4098     if (_evac_failure_closure != cl) {
  4099       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  4100       assert(!_drain_in_progress,
  4101              "Should only be true while someone holds the lock.");
  4102       // Set the global evac-failure closure to the current thread's.
  4103       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  4104       set_evac_failure_closure(cl);
  4105       // Now do the common part.
  4106       handle_evacuation_failure_common(old, m);
  4107       // Reset to NULL.
  4108       set_evac_failure_closure(NULL);
  4109     } else {
  4110       // The lock is already held, and this is recursive.
  4111       assert(_drain_in_progress, "This should only be the recursive case.");
  4112       handle_evacuation_failure_common(old, m);
  4114     return old;
  4115   } else {
  4116     // Someone else had a place to copy it.
  4117     return forward_ptr;
  4121 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  4122   set_evacuation_failed(true);
  4124   preserve_mark_if_necessary(old, m);
  4126   HeapRegion* r = heap_region_containing(old);
  4127   if (!r->evacuation_failed()) {
  4128     r->set_evacuation_failed(true);
  4129     if (G1PrintHeapRegions) {
  4130       gclog_or_tty->print("overflow in heap region "PTR_FORMAT" "
  4131                           "["PTR_FORMAT","PTR_FORMAT")\n",
  4132                           r, r->bottom(), r->end());
  4136   push_on_evac_failure_scan_stack(old);
  4138   if (!_drain_in_progress) {
  4139     // prevent recursion in copy_to_survivor_space()
  4140     _drain_in_progress = true;
  4141     drain_evac_failure_scan_stack();
  4142     _drain_in_progress = false;
  4146 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  4147   assert(evacuation_failed(), "Oversaving!");
  4148   // We want to call the "for_promotion_failure" version only in the
  4149   // case of a promotion failure.
  4150   if (m->must_be_preserved_for_promotion_failure(obj)) {
  4151     if (_objs_with_preserved_marks == NULL) {
  4152       assert(_preserved_marks_of_objs == NULL, "Both or none.");
  4153       _objs_with_preserved_marks =
  4154         new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  4155       _preserved_marks_of_objs =
  4156         new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
  4158     _objs_with_preserved_marks->push(obj);
  4159     _preserved_marks_of_objs->push(m);
  4163 // *** Parallel G1 Evacuation
  4165 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  4166                                                   size_t word_size) {
  4167   assert(!isHumongous(word_size),
  4168          err_msg("we should not be seeing humongous allocation requests "
  4169                  "during GC, word_size = "SIZE_FORMAT, word_size));
  4171   HeapRegion* alloc_region = _gc_alloc_regions[purpose];
  4172   // let the caller handle alloc failure
  4173   if (alloc_region == NULL) return NULL;
  4175   HeapWord* block = alloc_region->par_allocate(word_size);
  4176   if (block == NULL) {
  4177     block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
  4179   return block;
  4182 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
  4183                                             bool par) {
  4184   // Another thread might have obtained alloc_region for the given
  4185   // purpose, and might be attempting to allocate in it, and might
  4186   // succeed.  Therefore, we can't do the "finalization" stuff on the
  4187   // region below until we're sure the last allocation has happened.
  4188   // We ensure this by allocating the remaining space with a garbage
  4189   // object.
  4190   if (par) par_allocate_remaining_space(alloc_region);
  4191   // Now we can do the post-GC stuff on the region.
  4192   alloc_region->note_end_of_copying();
  4193   g1_policy()->record_after_bytes(alloc_region->used());
  4196 HeapWord*
  4197 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
  4198                                          HeapRegion*    alloc_region,
  4199                                          bool           par,
  4200                                          size_t         word_size) {
  4201   assert(!isHumongous(word_size),
  4202          err_msg("we should not be seeing humongous allocation requests "
  4203                  "during GC, word_size = "SIZE_FORMAT, word_size));
  4205   // We need to make sure we serialize calls to this method. Given
  4206   // that the FreeList_lock guards accesses to the free_list anyway,
  4207   // and we need to potentially remove a region from it, we'll use it
  4208   // to protect the whole call.
  4209   MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  4211   HeapWord* block = NULL;
  4212   // In the parallel case, a previous thread to obtain the lock may have
  4213   // already assigned a new gc_alloc_region.
  4214   if (alloc_region != _gc_alloc_regions[purpose]) {
  4215     assert(par, "But should only happen in parallel case.");
  4216     alloc_region = _gc_alloc_regions[purpose];
  4217     if (alloc_region == NULL) return NULL;
  4218     block = alloc_region->par_allocate(word_size);
  4219     if (block != NULL) return block;
  4220     // Otherwise, continue; this new region is empty, too.
  4222   assert(alloc_region != NULL, "We better have an allocation region");
  4223   retire_alloc_region(alloc_region, par);
  4225   if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
  4226     // Cannot allocate more regions for the given purpose.
  4227     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
  4228     // Is there an alternative?
  4229     if (purpose != alt_purpose) {
  4230       HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
  4231       // Has not the alternative region been aliased?
  4232       if (alloc_region != alt_region && alt_region != NULL) {
  4233         // Try to allocate in the alternative region.
  4234         if (par) {
  4235           block = alt_region->par_allocate(word_size);
  4236         } else {
  4237           block = alt_region->allocate(word_size);
  4239         // Make an alias.
  4240         _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
  4241         if (block != NULL) {
  4242           return block;
  4244         retire_alloc_region(alt_region, par);
  4246       // Both the allocation region and the alternative one are full
  4247       // and aliased, replace them with a new allocation region.
  4248       purpose = alt_purpose;
  4249     } else {
  4250       set_gc_alloc_region(purpose, NULL);
  4251       return NULL;
  4255   // Now allocate a new region for allocation.
  4256   alloc_region = new_gc_alloc_region(purpose, word_size);
  4258   // let the caller handle alloc failure
  4259   if (alloc_region != NULL) {
  4261     assert(check_gc_alloc_regions(), "alloc regions messed up");
  4262     assert(alloc_region->saved_mark_at_top(),
  4263            "Mark should have been saved already.");
  4264     // This must be done last: once it's installed, other regions may
  4265     // allocate in it (without holding the lock.)
  4266     set_gc_alloc_region(purpose, alloc_region);
  4268     if (par) {
  4269       block = alloc_region->par_allocate(word_size);
  4270     } else {
  4271       block = alloc_region->allocate(word_size);
  4273     // Caller handles alloc failure.
  4274   } else {
  4275     // This sets other apis using the same old alloc region to NULL, also.
  4276     set_gc_alloc_region(purpose, NULL);
  4278   return block;  // May be NULL.
  4281 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
  4282   HeapWord* block = NULL;
  4283   size_t free_words;
  4284   do {
  4285     free_words = r->free()/HeapWordSize;
  4286     // If there's too little space, no one can allocate, so we're done.
  4287     if (free_words < CollectedHeap::min_fill_size()) return;
  4288     // Otherwise, try to claim it.
  4289     block = r->par_allocate(free_words);
  4290   } while (block == NULL);
  4291   fill_with_object(block, free_words);
  4294 #ifndef PRODUCT
  4295 bool GCLabBitMapClosure::do_bit(size_t offset) {
  4296   HeapWord* addr = _bitmap->offsetToHeapWord(offset);
  4297   guarantee(_cm->isMarked(oop(addr)), "it should be!");
  4298   return true;
  4300 #endif // PRODUCT
  4302 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
  4303   : _g1h(g1h),
  4304     _refs(g1h->task_queue(queue_num)),
  4305     _dcq(&g1h->dirty_card_queue_set()),
  4306     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
  4307     _g1_rem(g1h->g1_rem_set()),
  4308     _hash_seed(17), _queue_num(queue_num),
  4309     _term_attempts(0),
  4310     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
  4311     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
  4312     _age_table(false),
  4313     _strong_roots_time(0), _term_time(0),
  4314     _alloc_buffer_waste(0), _undo_waste(0)
  4316   // we allocate G1YoungSurvRateNumRegions plus one entries, since
  4317   // we "sacrifice" entry 0 to keep track of surviving bytes for
  4318   // non-young regions (where the age is -1)
  4319   // We also add a few elements at the beginning and at the end in
  4320   // an attempt to eliminate cache contention
  4321   size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
  4322   size_t array_length = PADDING_ELEM_NUM +
  4323                         real_length +
  4324                         PADDING_ELEM_NUM;
  4325   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
  4326   if (_surviving_young_words_base == NULL)
  4327     vm_exit_out_of_memory(array_length * sizeof(size_t),
  4328                           "Not enough space for young surv histo.");
  4329   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  4330   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  4332   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
  4333   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
  4335   _start = os::elapsedTime();
  4338 void
  4339 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
  4341   st->print_raw_cr("GC Termination Stats");
  4342   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
  4343                    " ------waste (KiB)------");
  4344   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
  4345                    "  total   alloc    undo");
  4346   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
  4347                    " ------- ------- -------");
  4350 void
  4351 G1ParScanThreadState::print_termination_stats(int i,
  4352                                               outputStream* const st) const
  4354   const double elapsed_ms = elapsed_time() * 1000.0;
  4355   const double s_roots_ms = strong_roots_time() * 1000.0;
  4356   const double term_ms    = term_time() * 1000.0;
  4357   st->print_cr("%3d %9.2f %9.2f %6.2f "
  4358                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
  4359                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
  4360                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
  4361                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
  4362                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
  4363                alloc_buffer_waste() * HeapWordSize / K,
  4364                undo_waste() * HeapWordSize / K);
  4367 #ifdef ASSERT
  4368 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
  4369   assert(ref != NULL, "invariant");
  4370   assert(UseCompressedOops, "sanity");
  4371   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
  4372   oop p = oopDesc::load_decode_heap_oop(ref);
  4373   assert(_g1h->is_in_g1_reserved(p),
  4374          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4375   return true;
  4378 bool G1ParScanThreadState::verify_ref(oop* ref) const {
  4379   assert(ref != NULL, "invariant");
  4380   if (has_partial_array_mask(ref)) {
  4381     // Must be in the collection set--it's already been copied.
  4382     oop p = clear_partial_array_mask(ref);
  4383     assert(_g1h->obj_in_cs(p),
  4384            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4385   } else {
  4386     oop p = oopDesc::load_decode_heap_oop(ref);
  4387     assert(_g1h->is_in_g1_reserved(p),
  4388            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4390   return true;
  4393 bool G1ParScanThreadState::verify_task(StarTask ref) const {
  4394   if (ref.is_narrow()) {
  4395     return verify_ref((narrowOop*) ref);
  4396   } else {
  4397     return verify_ref((oop*) ref);
  4400 #endif // ASSERT
  4402 void G1ParScanThreadState::trim_queue() {
  4403   StarTask ref;
  4404   do {
  4405     // Drain the overflow stack first, so other threads can steal.
  4406     while (refs()->pop_overflow(ref)) {
  4407       deal_with_reference(ref);
  4409     while (refs()->pop_local(ref)) {
  4410       deal_with_reference(ref);
  4412   } while (!refs()->is_empty());
  4415 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  4416   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  4417   _par_scan_state(par_scan_state) { }
  4419 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
  4420   // This is called _after_ do_oop_work has been called, hence after
  4421   // the object has been relocated to its new location and *p points
  4422   // to its new location.
  4424   T heap_oop = oopDesc::load_heap_oop(p);
  4425   if (!oopDesc::is_null(heap_oop)) {
  4426     oop obj = oopDesc::decode_heap_oop(heap_oop);
  4427     assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
  4428            "shouldn't still be in the CSet if evacuation didn't fail.");
  4429     HeapWord* addr = (HeapWord*)obj;
  4430     if (_g1->is_in_g1_reserved(addr))
  4431       _cm->grayRoot(oop(addr));
  4435 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
  4436   size_t    word_sz = old->size();
  4437   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  4438   // +1 to make the -1 indexes valid...
  4439   int       young_index = from_region->young_index_in_cset()+1;
  4440   assert( (from_region->is_young() && young_index > 0) ||
  4441           (!from_region->is_young() && young_index == 0), "invariant" );
  4442   G1CollectorPolicy* g1p = _g1->g1_policy();
  4443   markOop m = old->mark();
  4444   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  4445                                            : m->age();
  4446   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  4447                                                              word_sz);
  4448   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
  4449   oop       obj     = oop(obj_ptr);
  4451   if (obj_ptr == NULL) {
  4452     // This will either forward-to-self, or detect that someone else has
  4453     // installed a forwarding pointer.
  4454     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  4455     return _g1->handle_evacuation_failure_par(cl, old);
  4458   // We're going to allocate linearly, so might as well prefetch ahead.
  4459   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
  4461   oop forward_ptr = old->forward_to_atomic(obj);
  4462   if (forward_ptr == NULL) {
  4463     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
  4464     if (g1p->track_object_age(alloc_purpose)) {
  4465       // We could simply do obj->incr_age(). However, this causes a
  4466       // performance issue. obj->incr_age() will first check whether
  4467       // the object has a displaced mark by checking its mark word;
  4468       // getting the mark word from the new location of the object
  4469       // stalls. So, given that we already have the mark word and we
  4470       // are about to install it anyway, it's better to increase the
  4471       // age on the mark word, when the object does not have a
  4472       // displaced mark word. We're not expecting many objects to have
  4473       // a displaced marked word, so that case is not optimized
  4474       // further (it could be...) and we simply call obj->incr_age().
  4476       if (m->has_displaced_mark_helper()) {
  4477         // in this case, we have to install the mark word first,
  4478         // otherwise obj looks to be forwarded (the old mark word,
  4479         // which contains the forward pointer, was copied)
  4480         obj->set_mark(m);
  4481         obj->incr_age();
  4482       } else {
  4483         m = m->incr_age();
  4484         obj->set_mark(m);
  4486       _par_scan_state->age_table()->add(obj, word_sz);
  4487     } else {
  4488       obj->set_mark(m);
  4491     // preserve "next" mark bit
  4492     if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
  4493       if (!use_local_bitmaps ||
  4494           !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
  4495         // if we couldn't mark it on the local bitmap (this happens when
  4496         // the object was not allocated in the GCLab), we have to bite
  4497         // the bullet and do the standard parallel mark
  4498         _cm->markAndGrayObjectIfNecessary(obj);
  4500 #if 1
  4501       if (_g1->isMarkedNext(old)) {
  4502         _cm->nextMarkBitMap()->parClear((HeapWord*)old);
  4504 #endif
  4507     size_t* surv_young_words = _par_scan_state->surviving_young_words();
  4508     surv_young_words[young_index] += word_sz;
  4510     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
  4511       arrayOop(old)->set_length(0);
  4512       oop* old_p = set_partial_array_mask(old);
  4513       _par_scan_state->push_on_queue(old_p);
  4514     } else {
  4515       // No point in using the slower heap_region_containing() method,
  4516       // given that we know obj is in the heap.
  4517       _scanner->set_region(_g1->heap_region_containing_raw(obj));
  4518       obj->oop_iterate_backwards(_scanner);
  4520   } else {
  4521     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
  4522     obj = forward_ptr;
  4524   return obj;
  4527 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
  4528 template <class T>
  4529 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee>
  4530 ::do_oop_work(T* p) {
  4531   oop obj = oopDesc::load_decode_heap_oop(p);
  4532   assert(barrier != G1BarrierRS || obj != NULL,
  4533          "Precondition: G1BarrierRS implies obj is nonNull");
  4535   // here the null check is implicit in the cset_fast_test() test
  4536   if (_g1->in_cset_fast_test(obj)) {
  4537 #if G1_REM_SET_LOGGING
  4538     gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
  4539                            "into CS.", p, (void*) obj);
  4540 #endif
  4541     if (obj->is_forwarded()) {
  4542       oopDesc::encode_store_heap_oop(p, obj->forwardee());
  4543     } else {
  4544       oop copy_oop = copy_to_survivor_space(obj);
  4545       oopDesc::encode_store_heap_oop(p, copy_oop);
  4547     // When scanning the RS, we only care about objs in CS.
  4548     if (barrier == G1BarrierRS) {
  4549       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  4553   if (barrier == G1BarrierEvac && obj != NULL) {
  4554     _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  4557   if (do_gen_barrier && obj != NULL) {
  4558     par_do_barrier(p);
  4562 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
  4563 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
  4565 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
  4566   assert(has_partial_array_mask(p), "invariant");
  4567   oop old = clear_partial_array_mask(p);
  4568   assert(old->is_objArray(), "must be obj array");
  4569   assert(old->is_forwarded(), "must be forwarded");
  4570   assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
  4572   objArrayOop obj = objArrayOop(old->forwardee());
  4573   assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
  4574   // Process ParGCArrayScanChunk elements now
  4575   // and push the remainder back onto queue
  4576   int start     = arrayOop(old)->length();
  4577   int end       = obj->length();
  4578   int remainder = end - start;
  4579   assert(start <= end, "just checking");
  4580   if (remainder > 2 * ParGCArrayScanChunk) {
  4581     // Test above combines last partial chunk with a full chunk
  4582     end = start + ParGCArrayScanChunk;
  4583     arrayOop(old)->set_length(end);
  4584     // Push remainder.
  4585     oop* old_p = set_partial_array_mask(old);
  4586     assert(arrayOop(old)->length() < obj->length(), "Empty push?");
  4587     _par_scan_state->push_on_queue(old_p);
  4588   } else {
  4589     // Restore length so that the heap remains parsable in
  4590     // case of evacuation failure.
  4591     arrayOop(old)->set_length(end);
  4593   _scanner.set_region(_g1->heap_region_containing_raw(obj));
  4594   // process our set of indices (include header in first chunk)
  4595   obj->oop_iterate_range(&_scanner, start, end);
  4598 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4599 protected:
  4600   G1CollectedHeap*              _g1h;
  4601   G1ParScanThreadState*         _par_scan_state;
  4602   RefToScanQueueSet*            _queues;
  4603   ParallelTaskTerminator*       _terminator;
  4605   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  4606   RefToScanQueueSet*      queues()         { return _queues; }
  4607   ParallelTaskTerminator* terminator()     { return _terminator; }
  4609 public:
  4610   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  4611                                 G1ParScanThreadState* par_scan_state,
  4612                                 RefToScanQueueSet* queues,
  4613                                 ParallelTaskTerminator* terminator)
  4614     : _g1h(g1h), _par_scan_state(par_scan_state),
  4615       _queues(queues), _terminator(terminator) {}
  4617   void do_void();
  4619 private:
  4620   inline bool offer_termination();
  4621 };
  4623 bool G1ParEvacuateFollowersClosure::offer_termination() {
  4624   G1ParScanThreadState* const pss = par_scan_state();
  4625   pss->start_term_time();
  4626   const bool res = terminator()->offer_termination();
  4627   pss->end_term_time();
  4628   return res;
  4631 void G1ParEvacuateFollowersClosure::do_void() {
  4632   StarTask stolen_task;
  4633   G1ParScanThreadState* const pss = par_scan_state();
  4634   pss->trim_queue();
  4636   do {
  4637     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  4638       assert(pss->verify_task(stolen_task), "sanity");
  4639       if (stolen_task.is_narrow()) {
  4640         pss->deal_with_reference((narrowOop*) stolen_task);
  4641       } else {
  4642         pss->deal_with_reference((oop*) stolen_task);
  4645       // We've just processed a reference and we might have made
  4646       // available new entries on the queues. So we have to make sure
  4647       // we drain the queues as necessary.
  4648       pss->trim_queue();
  4650   } while (!offer_termination());
  4652   pss->retire_alloc_buffers();
  4655 class G1ParTask : public AbstractGangTask {
  4656 protected:
  4657   G1CollectedHeap*       _g1h;
  4658   RefToScanQueueSet      *_queues;
  4659   ParallelTaskTerminator _terminator;
  4660   int _n_workers;
  4662   Mutex _stats_lock;
  4663   Mutex* stats_lock() { return &_stats_lock; }
  4665   size_t getNCards() {
  4666     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  4667       / G1BlockOffsetSharedArray::N_bytes;
  4670 public:
  4671   G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
  4672     : AbstractGangTask("G1 collection"),
  4673       _g1h(g1h),
  4674       _queues(task_queues),
  4675       _terminator(workers, _queues),
  4676       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
  4677       _n_workers(workers)
  4678   {}
  4680   RefToScanQueueSet* queues() { return _queues; }
  4682   RefToScanQueue *work_queue(int i) {
  4683     return queues()->queue(i);
  4686   void work(int i) {
  4687     if (i >= _n_workers) return;  // no work needed this round
  4689     double start_time_ms = os::elapsedTime() * 1000.0;
  4690     _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
  4692     ResourceMark rm;
  4693     HandleMark   hm;
  4695     G1ParScanThreadState            pss(_g1h, i);
  4696     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
  4697     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
  4698     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
  4700     pss.set_evac_closure(&scan_evac_cl);
  4701     pss.set_evac_failure_closure(&evac_failure_cl);
  4702     pss.set_partial_scan_closure(&partial_scan_cl);
  4704     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  4705     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  4706     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
  4707     G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  4709     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  4710     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  4711     G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  4713     OopsInHeapRegionClosure        *scan_root_cl;
  4714     OopsInHeapRegionClosure        *scan_perm_cl;
  4716     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  4717       scan_root_cl = &scan_mark_root_cl;
  4718       scan_perm_cl = &scan_mark_perm_cl;
  4719     } else {
  4720       scan_root_cl = &only_scan_root_cl;
  4721       scan_perm_cl = &only_scan_perm_cl;
  4724     pss.start_strong_roots();
  4725     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  4726                                   SharedHeap::SO_AllClasses,
  4727                                   scan_root_cl,
  4728                                   &push_heap_rs_cl,
  4729                                   scan_perm_cl,
  4730                                   i);
  4731     pss.end_strong_roots();
  4733       double start = os::elapsedTime();
  4734       G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  4735       evac.do_void();
  4736       double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  4737       double term_ms = pss.term_time()*1000.0;
  4738       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
  4739       _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
  4741     _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  4742     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  4744     // Clean up any par-expanded rem sets.
  4745     HeapRegionRemSet::par_cleanup();
  4747     if (ParallelGCVerbose) {
  4748       MutexLocker x(stats_lock());
  4749       pss.print_termination_stats(i);
  4752     assert(pss.refs()->is_empty(), "should be empty");
  4753     double end_time_ms = os::elapsedTime() * 1000.0;
  4754     _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
  4756 };
  4758 // *** Common G1 Evacuation Stuff
  4760 // This method is run in a GC worker.
  4762 void
  4763 G1CollectedHeap::
  4764 g1_process_strong_roots(bool collecting_perm_gen,
  4765                         SharedHeap::ScanningOption so,
  4766                         OopClosure* scan_non_heap_roots,
  4767                         OopsInHeapRegionClosure* scan_rs,
  4768                         OopsInGenClosure* scan_perm,
  4769                         int worker_i) {
  4770   // First scan the strong roots, including the perm gen.
  4771   double ext_roots_start = os::elapsedTime();
  4772   double closure_app_time_sec = 0.0;
  4774   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4775   BufferingOopsInGenClosure buf_scan_perm(scan_perm);
  4776   buf_scan_perm.set_generation(perm_gen());
  4778   // Walk the code cache w/o buffering, because StarTask cannot handle
  4779   // unaligned oop locations.
  4780   CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true);
  4782   process_strong_roots(false, // no scoping; this is parallel code
  4783                        collecting_perm_gen, so,
  4784                        &buf_scan_non_heap_roots,
  4785                        &eager_scan_code_roots,
  4786                        &buf_scan_perm);
  4788   // Finish up any enqueued closure apps.
  4789   buf_scan_non_heap_roots.done();
  4790   buf_scan_perm.done();
  4791   double ext_roots_end = os::elapsedTime();
  4792   g1_policy()->reset_obj_copy_time(worker_i);
  4793   double obj_copy_time_sec =
  4794     buf_scan_non_heap_roots.closure_app_seconds() +
  4795     buf_scan_perm.closure_app_seconds();
  4796   g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  4797   double ext_root_time_ms =
  4798     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  4799   g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  4801   // Scan strong roots in mark stack.
  4802   if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
  4803     concurrent_mark()->oops_do(scan_non_heap_roots);
  4805   double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
  4806   g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
  4808   // XXX What should this be doing in the parallel case?
  4809   g1_policy()->record_collection_pause_end_CH_strong_roots();
  4810   // Now scan the complement of the collection set.
  4811   if (scan_rs != NULL) {
  4812     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
  4814   // Finish with the ref_processor roots.
  4815   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4816     // We need to treat the discovered reference lists as roots and
  4817     // keep entries (which are added by the marking threads) on them
  4818     // live until they can be processed at the end of marking.
  4819     ref_processor()->weak_oops_do(scan_non_heap_roots);
  4820     ref_processor()->oops_do(scan_non_heap_roots);
  4822   g1_policy()->record_collection_pause_end_G1_strong_roots();
  4823   _process_strong_tasks->all_tasks_completed();
  4826 void
  4827 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
  4828                                        OopClosure* non_root_closure) {
  4829   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
  4830   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
  4834 class SaveMarksClosure: public HeapRegionClosure {
  4835 public:
  4836   bool doHeapRegion(HeapRegion* r) {
  4837     r->save_marks();
  4838     return false;
  4840 };
  4842 void G1CollectedHeap::save_marks() {
  4843   if (!CollectedHeap::use_parallel_gc_threads()) {
  4844     SaveMarksClosure sm;
  4845     heap_region_iterate(&sm);
  4847   // We do this even in the parallel case
  4848   perm_gen()->save_marks();
  4851 void G1CollectedHeap::evacuate_collection_set() {
  4852   set_evacuation_failed(false);
  4854   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  4855   concurrent_g1_refine()->set_use_cache(false);
  4856   concurrent_g1_refine()->clear_hot_cache_claimed_index();
  4858   int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
  4859   set_par_threads(n_workers);
  4860   G1ParTask g1_par_task(this, n_workers, _task_queues);
  4862   init_for_evac_failure(NULL);
  4864   rem_set()->prepare_for_younger_refs_iterate(true);
  4866   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  4867   double start_par = os::elapsedTime();
  4868   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4869     // The individual threads will set their evac-failure closures.
  4870     StrongRootsScope srs(this);
  4871     if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  4872     workers()->run_task(&g1_par_task);
  4873   } else {
  4874     StrongRootsScope srs(this);
  4875     g1_par_task.work(0);
  4878   double par_time = (os::elapsedTime() - start_par) * 1000.0;
  4879   g1_policy()->record_par_time(par_time);
  4880   set_par_threads(0);
  4881   // Is this the right thing to do here?  We don't save marks
  4882   // on individual heap regions when we allocate from
  4883   // them in parallel, so this seems like the correct place for this.
  4884   retire_all_alloc_regions();
  4886   // Weak root processing.
  4887   // Note: when JSR 292 is enabled and code blobs can contain
  4888   // non-perm oops then we will need to process the code blobs
  4889   // here too.
  4891     G1IsAliveClosure is_alive(this);
  4892     G1KeepAliveClosure keep_alive(this);
  4893     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  4895   release_gc_alloc_regions(false /* totally */);
  4896   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  4898   concurrent_g1_refine()->clear_hot_cache();
  4899   concurrent_g1_refine()->set_use_cache(true);
  4901   finalize_for_evac_failure();
  4903   // Must do this before removing self-forwarding pointers, which clears
  4904   // the per-region evac-failure flags.
  4905   concurrent_mark()->complete_marking_in_collection_set();
  4907   if (evacuation_failed()) {
  4908     remove_self_forwarding_pointers();
  4909     if (PrintGCDetails) {
  4910       gclog_or_tty->print(" (to-space overflow)");
  4911     } else if (PrintGC) {
  4912       gclog_or_tty->print("--");
  4916   if (G1DeferredRSUpdate) {
  4917     RedirtyLoggedCardTableEntryFastClosure redirty;
  4918     dirty_card_queue_set().set_closure(&redirty);
  4919     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  4921     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
  4922     dcq.merge_bufferlists(&dirty_card_queue_set());
  4923     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  4925   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4928 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
  4929                                      size_t* pre_used,
  4930                                      FreeRegionList* free_list,
  4931                                      HumongousRegionSet* humongous_proxy_set,
  4932                                      HRRSCleanupTask* hrrs_cleanup_task,
  4933                                      bool par) {
  4934   if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
  4935     if (hr->isHumongous()) {
  4936       assert(hr->startsHumongous(), "we should only see starts humongous");
  4937       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
  4938     } else {
  4939       free_region(hr, pre_used, free_list, par);
  4941   } else {
  4942     hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
  4946 void G1CollectedHeap::free_region(HeapRegion* hr,
  4947                                   size_t* pre_used,
  4948                                   FreeRegionList* free_list,
  4949                                   bool par) {
  4950   assert(!hr->isHumongous(), "this is only for non-humongous regions");
  4951   assert(!hr->is_empty(), "the region should not be empty");
  4952   assert(free_list != NULL, "pre-condition");
  4954   *pre_used += hr->used();
  4955   hr->hr_clear(par, true /* clear_space */);
  4956   free_list->add_as_tail(hr);
  4959 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
  4960                                      size_t* pre_used,
  4961                                      FreeRegionList* free_list,
  4962                                      HumongousRegionSet* humongous_proxy_set,
  4963                                      bool par) {
  4964   assert(hr->startsHumongous(), "this is only for starts humongous regions");
  4965   assert(free_list != NULL, "pre-condition");
  4966   assert(humongous_proxy_set != NULL, "pre-condition");
  4968   size_t hr_used = hr->used();
  4969   size_t hr_capacity = hr->capacity();
  4970   size_t hr_pre_used = 0;
  4971   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
  4972   hr->set_notHumongous();
  4973   free_region(hr, &hr_pre_used, free_list, par);
  4975   int i = hr->hrs_index() + 1;
  4976   size_t num = 1;
  4977   while ((size_t) i < n_regions()) {
  4978     HeapRegion* curr_hr = _hrs->at(i);
  4979     if (!curr_hr->continuesHumongous()) {
  4980       break;
  4982     curr_hr->set_notHumongous();
  4983     free_region(curr_hr, &hr_pre_used, free_list, par);
  4984     num += 1;
  4985     i += 1;
  4987   assert(hr_pre_used == hr_used,
  4988          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
  4989                  "should be the same", hr_pre_used, hr_used));
  4990   *pre_used += hr_pre_used;
  4993 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
  4994                                        FreeRegionList* free_list,
  4995                                        HumongousRegionSet* humongous_proxy_set,
  4996                                        bool par) {
  4997   if (pre_used > 0) {
  4998     Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
  4999     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  5000     assert(_summary_bytes_used >= pre_used,
  5001            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
  5002                    "should be >= pre_used: "SIZE_FORMAT,
  5003                    _summary_bytes_used, pre_used));
  5004     _summary_bytes_used -= pre_used;
  5006   if (free_list != NULL && !free_list->is_empty()) {
  5007     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  5008     _free_list.add_as_tail(free_list);
  5010   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
  5011     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
  5012     _humongous_set.update_from_proxy(humongous_proxy_set);
  5016 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
  5017   while (list != NULL) {
  5018     guarantee( list->is_young(), "invariant" );
  5020     HeapWord* bottom = list->bottom();
  5021     HeapWord* end = list->end();
  5022     MemRegion mr(bottom, end);
  5023     ct_bs->dirty(mr);
  5025     list = list->get_next_young_region();
  5030 class G1ParCleanupCTTask : public AbstractGangTask {
  5031   CardTableModRefBS* _ct_bs;
  5032   G1CollectedHeap* _g1h;
  5033   HeapRegion* volatile _su_head;
  5034 public:
  5035   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
  5036                      G1CollectedHeap* g1h,
  5037                      HeapRegion* survivor_list) :
  5038     AbstractGangTask("G1 Par Cleanup CT Task"),
  5039     _ct_bs(ct_bs),
  5040     _g1h(g1h),
  5041     _su_head(survivor_list)
  5042   { }
  5044   void work(int i) {
  5045     HeapRegion* r;
  5046     while (r = _g1h->pop_dirty_cards_region()) {
  5047       clear_cards(r);
  5049     // Redirty the cards of the survivor regions.
  5050     dirty_list(&this->_su_head);
  5053   void clear_cards(HeapRegion* r) {
  5054     // Cards for Survivor regions will be dirtied later.
  5055     if (!r->is_survivor()) {
  5056       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  5060   void dirty_list(HeapRegion* volatile * head_ptr) {
  5061     HeapRegion* head;
  5062     do {
  5063       // Pop region off the list.
  5064       head = *head_ptr;
  5065       if (head != NULL) {
  5066         HeapRegion* r = (HeapRegion*)
  5067           Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
  5068         if (r == head) {
  5069           assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
  5070           _ct_bs->dirty(MemRegion(r->bottom(), r->end()));
  5073     } while (*head_ptr != NULL);
  5075 };
  5078 #ifndef PRODUCT
  5079 class G1VerifyCardTableCleanup: public HeapRegionClosure {
  5080   CardTableModRefBS* _ct_bs;
  5081 public:
  5082   G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
  5083     : _ct_bs(ct_bs)
  5084   { }
  5085   virtual bool doHeapRegion(HeapRegion* r)
  5087     MemRegion mr(r->bottom(), r->end());
  5088     if (r->is_survivor()) {
  5089       _ct_bs->verify_dirty_region(mr);
  5090     } else {
  5091       _ct_bs->verify_clean_region(mr);
  5093     return false;
  5095 };
  5096 #endif
  5098 void G1CollectedHeap::cleanUpCardTable() {
  5099   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  5100   double start = os::elapsedTime();
  5102   // Iterate over the dirty cards region list.
  5103   G1ParCleanupCTTask cleanup_task(ct_bs, this,
  5104                                   _young_list->first_survivor_region());
  5106   if (ParallelGCThreads > 0) {
  5107     set_par_threads(workers()->total_workers());
  5108     workers()->run_task(&cleanup_task);
  5109     set_par_threads(0);
  5110   } else {
  5111     while (_dirty_cards_region_list) {
  5112       HeapRegion* r = _dirty_cards_region_list;
  5113       cleanup_task.clear_cards(r);
  5114       _dirty_cards_region_list = r->get_next_dirty_cards_region();
  5115       if (_dirty_cards_region_list == r) {
  5116         // The last region.
  5117         _dirty_cards_region_list = NULL;
  5119       r->set_next_dirty_cards_region(NULL);
  5121     // now, redirty the cards of the survivor regions
  5122     // (it seemed faster to do it this way, instead of iterating over
  5123     // all regions and then clearing / dirtying as appropriate)
  5124     dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
  5127   double elapsed = os::elapsedTime() - start;
  5128   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
  5129 #ifndef PRODUCT
  5130   if (G1VerifyCTCleanup || VerifyAfterGC) {
  5131     G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
  5132     heap_region_iterate(&cleanup_verifier);
  5134 #endif
  5137 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  5138   size_t pre_used = 0;
  5139   FreeRegionList local_free_list("Local List for CSet Freeing");
  5141   double young_time_ms     = 0.0;
  5142   double non_young_time_ms = 0.0;
  5144   // Since the collection set is a superset of the the young list,
  5145   // all we need to do to clear the young list is clear its
  5146   // head and length, and unlink any young regions in the code below
  5147   _young_list->clear();
  5149   G1CollectorPolicy* policy = g1_policy();
  5151   double start_sec = os::elapsedTime();
  5152   bool non_young = true;
  5154   HeapRegion* cur = cs_head;
  5155   int age_bound = -1;
  5156   size_t rs_lengths = 0;
  5158   while (cur != NULL) {
  5159     assert(!is_on_free_list(cur), "sanity");
  5161     if (non_young) {
  5162       if (cur->is_young()) {
  5163         double end_sec = os::elapsedTime();
  5164         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5165         non_young_time_ms += elapsed_ms;
  5167         start_sec = os::elapsedTime();
  5168         non_young = false;
  5170     } else {
  5171       double end_sec = os::elapsedTime();
  5172       double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5173       young_time_ms += elapsed_ms;
  5175       start_sec = os::elapsedTime();
  5176       non_young = true;
  5179     rs_lengths += cur->rem_set()->occupied();
  5181     HeapRegion* next = cur->next_in_collection_set();
  5182     assert(cur->in_collection_set(), "bad CS");
  5183     cur->set_next_in_collection_set(NULL);
  5184     cur->set_in_collection_set(false);
  5186     if (cur->is_young()) {
  5187       int index = cur->young_index_in_cset();
  5188       guarantee( index != -1, "invariant" );
  5189       guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
  5190       size_t words_survived = _surviving_young_words[index];
  5191       cur->record_surv_words_in_group(words_survived);
  5193       // At this point the we have 'popped' cur from the collection set
  5194       // (linked via next_in_collection_set()) but it is still in the
  5195       // young list (linked via next_young_region()). Clear the
  5196       // _next_young_region field.
  5197       cur->set_next_young_region(NULL);
  5198     } else {
  5199       int index = cur->young_index_in_cset();
  5200       guarantee( index == -1, "invariant" );
  5203     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  5204             (!cur->is_young() && cur->young_index_in_cset() == -1),
  5205             "invariant" );
  5207     if (!cur->evacuation_failed()) {
  5208       // And the region is empty.
  5209       assert(!cur->is_empty(), "Should not have empty regions in a CS.");
  5210       free_region(cur, &pre_used, &local_free_list, false /* par */);
  5211     } else {
  5212       cur->uninstall_surv_rate_group();
  5213       if (cur->is_young())
  5214         cur->set_young_index_in_cset(-1);
  5215       cur->set_not_young();
  5216       cur->set_evacuation_failed(false);
  5218     cur = next;
  5221   policy->record_max_rs_lengths(rs_lengths);
  5222   policy->cset_regions_freed();
  5224   double end_sec = os::elapsedTime();
  5225   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5226   if (non_young)
  5227     non_young_time_ms += elapsed_ms;
  5228   else
  5229     young_time_ms += elapsed_ms;
  5231   update_sets_after_freeing_regions(pre_used, &local_free_list,
  5232                                     NULL /* humongous_proxy_set */,
  5233                                     false /* par */);
  5234   policy->record_young_free_cset_time_ms(young_time_ms);
  5235   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
  5238 // This routine is similar to the above but does not record
  5239 // any policy statistics or update free lists; we are abandoning
  5240 // the current incremental collection set in preparation of a
  5241 // full collection. After the full GC we will start to build up
  5242 // the incremental collection set again.
  5243 // This is only called when we're doing a full collection
  5244 // and is immediately followed by the tearing down of the young list.
  5246 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
  5247   HeapRegion* cur = cs_head;
  5249   while (cur != NULL) {
  5250     HeapRegion* next = cur->next_in_collection_set();
  5251     assert(cur->in_collection_set(), "bad CS");
  5252     cur->set_next_in_collection_set(NULL);
  5253     cur->set_in_collection_set(false);
  5254     cur->set_young_index_in_cset(-1);
  5255     cur = next;
  5259 void G1CollectedHeap::set_free_regions_coming() {
  5260   if (G1ConcRegionFreeingVerbose) {
  5261     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  5262                            "setting free regions coming");
  5265   assert(!free_regions_coming(), "pre-condition");
  5266   _free_regions_coming = true;
  5269 void G1CollectedHeap::reset_free_regions_coming() {
  5271     assert(free_regions_coming(), "pre-condition");
  5272     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  5273     _free_regions_coming = false;
  5274     SecondaryFreeList_lock->notify_all();
  5277   if (G1ConcRegionFreeingVerbose) {
  5278     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
  5279                            "reset free regions coming");
  5283 void G1CollectedHeap::wait_while_free_regions_coming() {
  5284   // Most of the time we won't have to wait, so let's do a quick test
  5285   // first before we take the lock.
  5286   if (!free_regions_coming()) {
  5287     return;
  5290   if (G1ConcRegionFreeingVerbose) {
  5291     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  5292                            "waiting for free regions");
  5296     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  5297     while (free_regions_coming()) {
  5298       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
  5302   if (G1ConcRegionFreeingVerbose) {
  5303     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
  5304                            "done waiting for free regions");
  5308 size_t G1CollectedHeap::n_regions() {
  5309   return _hrs->length();
  5312 size_t G1CollectedHeap::max_regions() {
  5313   return
  5314     (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
  5315     HeapRegion::GrainBytes;
  5318 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  5319   assert(heap_lock_held_for_gc(),
  5320               "the heap lock should already be held by or for this thread");
  5321   _young_list->push_region(hr);
  5322   g1_policy()->set_region_short_lived(hr);
  5325 class NoYoungRegionsClosure: public HeapRegionClosure {
  5326 private:
  5327   bool _success;
  5328 public:
  5329   NoYoungRegionsClosure() : _success(true) { }
  5330   bool doHeapRegion(HeapRegion* r) {
  5331     if (r->is_young()) {
  5332       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  5333                              r->bottom(), r->end());
  5334       _success = false;
  5336     return false;
  5338   bool success() { return _success; }
  5339 };
  5341 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
  5342   bool ret = _young_list->check_list_empty(check_sample);
  5344   if (check_heap) {
  5345     NoYoungRegionsClosure closure;
  5346     heap_region_iterate(&closure);
  5347     ret = ret && closure.success();
  5350   return ret;
  5353 void G1CollectedHeap::empty_young_list() {
  5354   assert(heap_lock_held_for_gc(),
  5355               "the heap lock should already be held by or for this thread");
  5356   assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
  5358   _young_list->empty_list();
  5361 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
  5362   bool no_allocs = true;
  5363   for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
  5364     HeapRegion* r = _gc_alloc_regions[ap];
  5365     no_allocs = r == NULL || r->saved_mark_at_top();
  5367   return no_allocs;
  5370 void G1CollectedHeap::retire_all_alloc_regions() {
  5371   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  5372     HeapRegion* r = _gc_alloc_regions[ap];
  5373     if (r != NULL) {
  5374       // Check for aliases.
  5375       bool has_processed_alias = false;
  5376       for (int i = 0; i < ap; ++i) {
  5377         if (_gc_alloc_regions[i] == r) {
  5378           has_processed_alias = true;
  5379           break;
  5382       if (!has_processed_alias) {
  5383         retire_alloc_region(r, false /* par */);
  5389 // Done at the start of full GC.
  5390 void G1CollectedHeap::tear_down_region_lists() {
  5391   _free_list.remove_all();
  5394 class RegionResetter: public HeapRegionClosure {
  5395   G1CollectedHeap* _g1h;
  5396   FreeRegionList _local_free_list;
  5398 public:
  5399   RegionResetter() : _g1h(G1CollectedHeap::heap()),
  5400                      _local_free_list("Local Free List for RegionResetter") { }
  5402   bool doHeapRegion(HeapRegion* r) {
  5403     if (r->continuesHumongous()) return false;
  5404     if (r->top() > r->bottom()) {
  5405       if (r->top() < r->end()) {
  5406         Copy::fill_to_words(r->top(),
  5407                           pointer_delta(r->end(), r->top()));
  5409     } else {
  5410       assert(r->is_empty(), "tautology");
  5411       _local_free_list.add_as_tail(r);
  5413     return false;
  5416   void update_free_lists() {
  5417     _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL,
  5418                                             false /* par */);
  5420 };
  5422 // Done at the end of full GC.
  5423 void G1CollectedHeap::rebuild_region_lists() {
  5424   // This needs to go at the end of the full GC.
  5425   RegionResetter rs;
  5426   heap_region_iterate(&rs);
  5427   rs.update_free_lists();
  5430 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  5431   _refine_cte_cl->set_concurrent(concurrent);
  5434 #ifdef ASSERT
  5436 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  5437   HeapRegion* hr = heap_region_containing(p);
  5438   if (hr == NULL) {
  5439     return is_in_permanent(p);
  5440   } else {
  5441     return hr->is_in(p);
  5444 #endif // ASSERT
  5446 class VerifyRegionListsClosure : public HeapRegionClosure {
  5447 private:
  5448   HumongousRegionSet* _humongous_set;
  5449   FreeRegionList*     _free_list;
  5450   size_t              _region_count;
  5452 public:
  5453   VerifyRegionListsClosure(HumongousRegionSet* humongous_set,
  5454                            FreeRegionList* free_list) :
  5455     _humongous_set(humongous_set), _free_list(free_list),
  5456     _region_count(0) { }
  5458   size_t region_count()      { return _region_count;      }
  5460   bool doHeapRegion(HeapRegion* hr) {
  5461     _region_count += 1;
  5463     if (hr->continuesHumongous()) {
  5464       return false;
  5467     if (hr->is_young()) {
  5468       // TODO
  5469     } else if (hr->startsHumongous()) {
  5470       _humongous_set->verify_next_region(hr);
  5471     } else if (hr->is_empty()) {
  5472       _free_list->verify_next_region(hr);
  5474     return false;
  5476 };
  5478 void G1CollectedHeap::verify_region_sets() {
  5479   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  5481   // First, check the explicit lists.
  5482   _free_list.verify();
  5484     // Given that a concurrent operation might be adding regions to
  5485     // the secondary free list we have to take the lock before
  5486     // verifying it.
  5487     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  5488     _secondary_free_list.verify();
  5490   _humongous_set.verify();
  5492   // If a concurrent region freeing operation is in progress it will
  5493   // be difficult to correctly attributed any free regions we come
  5494   // across to the correct free list given that they might belong to
  5495   // one of several (free_list, secondary_free_list, any local lists,
  5496   // etc.). So, if that's the case we will skip the rest of the
  5497   // verification operation. Alternatively, waiting for the concurrent
  5498   // operation to complete will have a non-trivial effect on the GC's
  5499   // operation (no concurrent operation will last longer than the
  5500   // interval between two calls to verification) and it might hide
  5501   // any issues that we would like to catch during testing.
  5502   if (free_regions_coming()) {
  5503     return;
  5507     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  5508     // Make sure we append the secondary_free_list on the free_list so
  5509     // that all free regions we will come across can be safely
  5510     // attributed to the free_list.
  5511     append_secondary_free_list();
  5514   // Finally, make sure that the region accounting in the lists is
  5515   // consistent with what we see in the heap.
  5516   _humongous_set.verify_start();
  5517   _free_list.verify_start();
  5519   VerifyRegionListsClosure cl(&_humongous_set, &_free_list);
  5520   heap_region_iterate(&cl);
  5522   _humongous_set.verify_end();
  5523   _free_list.verify_end();

mercurial