src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Mon, 10 Jan 2011 17:14:53 -0500

author
kamg
date
Mon, 10 Jan 2011 17:14:53 -0500
changeset 2445
7246a374a9f2
parent 2381
7c5250dbd584
child 2457
ffd725ff6943
permissions
-rw-r--r--

6458402: 3 jvmti tests fail with CMS and +ExplicitGCInvokesConcurrent
Summary: Make JvmtiGCMark safe to run non-safepoint and instrument CMS
Reviewed-by: ysr, dcubed

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/icBuffer.hpp"
    27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
    28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    31 #include "gc_implementation/g1/concurrentZFThread.hpp"
    32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    34 #include "gc_implementation/g1/g1MarkSweep.hpp"
    35 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    36 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    37 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    39 #include "gc_implementation/g1/vm_operations_g1.hpp"
    40 #include "gc_implementation/shared/isGCActiveMark.hpp"
    41 #include "memory/gcLocker.inline.hpp"
    42 #include "memory/genOopClosures.inline.hpp"
    43 #include "memory/generationSpec.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "oops/oop.pcgc.inline.hpp"
    46 #include "runtime/aprofiler.hpp"
    47 #include "runtime/vmThread.hpp"
    49 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
    51 // turn it on so that the contents of the young list (scan-only /
    52 // to-be-collected) are printed at "strategic" points before / during
    53 // / after the collection --- this is useful for debugging
    54 #define YOUNG_LIST_VERBOSE 0
    55 // CURRENT STATUS
    56 // This file is under construction.  Search for "FIXME".
    58 // INVARIANTS/NOTES
    59 //
    60 // All allocation activity covered by the G1CollectedHeap interface is
    61 // serialized by acquiring the HeapLock.  This happens in mem_allocate
    62 // and allocate_new_tlab, which are the "entry" points to the
    63 // allocation code from the rest of the JVM.  (Note that this does not
    64 // apply to TLAB allocation, which is not part of this interface: it
    65 // is done by clients of this interface.)
    67 // Local to this file.
    69 class RefineCardTableEntryClosure: public CardTableEntryClosure {
    70   SuspendibleThreadSet* _sts;
    71   G1RemSet* _g1rs;
    72   ConcurrentG1Refine* _cg1r;
    73   bool _concurrent;
    74 public:
    75   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
    76                               G1RemSet* g1rs,
    77                               ConcurrentG1Refine* cg1r) :
    78     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
    79   {}
    80   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    81     bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
    82     // This path is executed by the concurrent refine or mutator threads,
    83     // concurrently, and so we do not care if card_ptr contains references
    84     // that point into the collection set.
    85     assert(!oops_into_cset, "should be");
    87     if (_concurrent && _sts->should_yield()) {
    88       // Caller will actually yield.
    89       return false;
    90     }
    91     // Otherwise, we finished successfully; return true.
    92     return true;
    93   }
    94   void set_concurrent(bool b) { _concurrent = b; }
    95 };
    98 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
    99   int _calls;
   100   G1CollectedHeap* _g1h;
   101   CardTableModRefBS* _ctbs;
   102   int _histo[256];
   103 public:
   104   ClearLoggedCardTableEntryClosure() :
   105     _calls(0)
   106   {
   107     _g1h = G1CollectedHeap::heap();
   108     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   109     for (int i = 0; i < 256; i++) _histo[i] = 0;
   110   }
   111   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   112     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   113       _calls++;
   114       unsigned char* ujb = (unsigned char*)card_ptr;
   115       int ind = (int)(*ujb);
   116       _histo[ind]++;
   117       *card_ptr = -1;
   118     }
   119     return true;
   120   }
   121   int calls() { return _calls; }
   122   void print_histo() {
   123     gclog_or_tty->print_cr("Card table value histogram:");
   124     for (int i = 0; i < 256; i++) {
   125       if (_histo[i] != 0) {
   126         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
   127       }
   128     }
   129   }
   130 };
   132 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
   133   int _calls;
   134   G1CollectedHeap* _g1h;
   135   CardTableModRefBS* _ctbs;
   136 public:
   137   RedirtyLoggedCardTableEntryClosure() :
   138     _calls(0)
   139   {
   140     _g1h = G1CollectedHeap::heap();
   141     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   142   }
   143   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   144     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   145       _calls++;
   146       *card_ptr = 0;
   147     }
   148     return true;
   149   }
   150   int calls() { return _calls; }
   151 };
   153 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
   154 public:
   155   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   156     *card_ptr = CardTableModRefBS::dirty_card_val();
   157     return true;
   158   }
   159 };
   161 YoungList::YoungList(G1CollectedHeap* g1h)
   162   : _g1h(g1h), _head(NULL),
   163     _length(0),
   164     _last_sampled_rs_lengths(0),
   165     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
   166 {
   167   guarantee( check_list_empty(false), "just making sure..." );
   168 }
   170 void YoungList::push_region(HeapRegion *hr) {
   171   assert(!hr->is_young(), "should not already be young");
   172   assert(hr->get_next_young_region() == NULL, "cause it should!");
   174   hr->set_next_young_region(_head);
   175   _head = hr;
   177   hr->set_young();
   178   double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
   179   ++_length;
   180 }
   182 void YoungList::add_survivor_region(HeapRegion* hr) {
   183   assert(hr->is_survivor(), "should be flagged as survivor region");
   184   assert(hr->get_next_young_region() == NULL, "cause it should!");
   186   hr->set_next_young_region(_survivor_head);
   187   if (_survivor_head == NULL) {
   188     _survivor_tail = hr;
   189   }
   190   _survivor_head = hr;
   192   ++_survivor_length;
   193 }
   195 void YoungList::empty_list(HeapRegion* list) {
   196   while (list != NULL) {
   197     HeapRegion* next = list->get_next_young_region();
   198     list->set_next_young_region(NULL);
   199     list->uninstall_surv_rate_group();
   200     list->set_not_young();
   201     list = next;
   202   }
   203 }
   205 void YoungList::empty_list() {
   206   assert(check_list_well_formed(), "young list should be well formed");
   208   empty_list(_head);
   209   _head = NULL;
   210   _length = 0;
   212   empty_list(_survivor_head);
   213   _survivor_head = NULL;
   214   _survivor_tail = NULL;
   215   _survivor_length = 0;
   217   _last_sampled_rs_lengths = 0;
   219   assert(check_list_empty(false), "just making sure...");
   220 }
   222 bool YoungList::check_list_well_formed() {
   223   bool ret = true;
   225   size_t length = 0;
   226   HeapRegion* curr = _head;
   227   HeapRegion* last = NULL;
   228   while (curr != NULL) {
   229     if (!curr->is_young()) {
   230       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   231                              "incorrectly tagged (y: %d, surv: %d)",
   232                              curr->bottom(), curr->end(),
   233                              curr->is_young(), curr->is_survivor());
   234       ret = false;
   235     }
   236     ++length;
   237     last = curr;
   238     curr = curr->get_next_young_region();
   239   }
   240   ret = ret && (length == _length);
   242   if (!ret) {
   243     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   244     gclog_or_tty->print_cr("###   list has %d entries, _length is %d",
   245                            length, _length);
   246   }
   248   return ret;
   249 }
   251 bool YoungList::check_list_empty(bool check_sample) {
   252   bool ret = true;
   254   if (_length != 0) {
   255     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
   256                   _length);
   257     ret = false;
   258   }
   259   if (check_sample && _last_sampled_rs_lengths != 0) {
   260     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   261     ret = false;
   262   }
   263   if (_head != NULL) {
   264     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   265     ret = false;
   266   }
   267   if (!ret) {
   268     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   269   }
   271   return ret;
   272 }
   274 void
   275 YoungList::rs_length_sampling_init() {
   276   _sampled_rs_lengths = 0;
   277   _curr               = _head;
   278 }
   280 bool
   281 YoungList::rs_length_sampling_more() {
   282   return _curr != NULL;
   283 }
   285 void
   286 YoungList::rs_length_sampling_next() {
   287   assert( _curr != NULL, "invariant" );
   288   size_t rs_length = _curr->rem_set()->occupied();
   290   _sampled_rs_lengths += rs_length;
   292   // The current region may not yet have been added to the
   293   // incremental collection set (it gets added when it is
   294   // retired as the current allocation region).
   295   if (_curr->in_collection_set()) {
   296     // Update the collection set policy information for this region
   297     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
   298   }
   300   _curr = _curr->get_next_young_region();
   301   if (_curr == NULL) {
   302     _last_sampled_rs_lengths = _sampled_rs_lengths;
   303     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   304   }
   305 }
   307 void
   308 YoungList::reset_auxilary_lists() {
   309   guarantee( is_empty(), "young list should be empty" );
   310   assert(check_list_well_formed(), "young list should be well formed");
   312   // Add survivor regions to SurvRateGroup.
   313   _g1h->g1_policy()->note_start_adding_survivor_regions();
   314   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   316   for (HeapRegion* curr = _survivor_head;
   317        curr != NULL;
   318        curr = curr->get_next_young_region()) {
   319     _g1h->g1_policy()->set_region_survivors(curr);
   321     // The region is a non-empty survivor so let's add it to
   322     // the incremental collection set for the next evacuation
   323     // pause.
   324     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   325   }
   326   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   328   _head   = _survivor_head;
   329   _length = _survivor_length;
   330   if (_survivor_head != NULL) {
   331     assert(_survivor_tail != NULL, "cause it shouldn't be");
   332     assert(_survivor_length > 0, "invariant");
   333     _survivor_tail->set_next_young_region(NULL);
   334   }
   336   // Don't clear the survivor list handles until the start of
   337   // the next evacuation pause - we need it in order to re-tag
   338   // the survivor regions from this evacuation pause as 'young'
   339   // at the start of the next.
   341   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   343   assert(check_list_well_formed(), "young list should be well formed");
   344 }
   346 void YoungList::print() {
   347   HeapRegion* lists[] = {_head,   _survivor_head};
   348   const char* names[] = {"YOUNG", "SURVIVOR"};
   350   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   351     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   352     HeapRegion *curr = lists[list];
   353     if (curr == NULL)
   354       gclog_or_tty->print_cr("  empty");
   355     while (curr != NULL) {
   356       gclog_or_tty->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
   357                              "age: %4d, y: %d, surv: %d",
   358                              curr->bottom(), curr->end(),
   359                              curr->top(),
   360                              curr->prev_top_at_mark_start(),
   361                              curr->next_top_at_mark_start(),
   362                              curr->top_at_conc_mark_count(),
   363                              curr->age_in_surv_rate_group_cond(),
   364                              curr->is_young(),
   365                              curr->is_survivor());
   366       curr = curr->get_next_young_region();
   367     }
   368   }
   370   gclog_or_tty->print_cr("");
   371 }
   373 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   374 {
   375   // Claim the right to put the region on the dirty cards region list
   376   // by installing a self pointer.
   377   HeapRegion* next = hr->get_next_dirty_cards_region();
   378   if (next == NULL) {
   379     HeapRegion* res = (HeapRegion*)
   380       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   381                           NULL);
   382     if (res == NULL) {
   383       HeapRegion* head;
   384       do {
   385         // Put the region to the dirty cards region list.
   386         head = _dirty_cards_region_list;
   387         next = (HeapRegion*)
   388           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   389         if (next == head) {
   390           assert(hr->get_next_dirty_cards_region() == hr,
   391                  "hr->get_next_dirty_cards_region() != hr");
   392           if (next == NULL) {
   393             // The last region in the list points to itself.
   394             hr->set_next_dirty_cards_region(hr);
   395           } else {
   396             hr->set_next_dirty_cards_region(next);
   397           }
   398         }
   399       } while (next != head);
   400     }
   401   }
   402 }
   404 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   405 {
   406   HeapRegion* head;
   407   HeapRegion* hr;
   408   do {
   409     head = _dirty_cards_region_list;
   410     if (head == NULL) {
   411       return NULL;
   412     }
   413     HeapRegion* new_head = head->get_next_dirty_cards_region();
   414     if (head == new_head) {
   415       // The last region.
   416       new_head = NULL;
   417     }
   418     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   419                                           head);
   420   } while (hr != head);
   421   assert(hr != NULL, "invariant");
   422   hr->set_next_dirty_cards_region(NULL);
   423   return hr;
   424 }
   426 void G1CollectedHeap::stop_conc_gc_threads() {
   427   _cg1r->stop();
   428   _czft->stop();
   429   _cmThread->stop();
   430 }
   433 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   434   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   435   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   437   // Count the dirty cards at the start.
   438   CountNonCleanMemRegionClosure count1(this);
   439   ct_bs->mod_card_iterate(&count1);
   440   int orig_count = count1.n();
   442   // First clear the logged cards.
   443   ClearLoggedCardTableEntryClosure clear;
   444   dcqs.set_closure(&clear);
   445   dcqs.apply_closure_to_all_completed_buffers();
   446   dcqs.iterate_closure_all_threads(false);
   447   clear.print_histo();
   449   // Now ensure that there's no dirty cards.
   450   CountNonCleanMemRegionClosure count2(this);
   451   ct_bs->mod_card_iterate(&count2);
   452   if (count2.n() != 0) {
   453     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   454                            count2.n(), orig_count);
   455   }
   456   guarantee(count2.n() == 0, "Card table should be clean.");
   458   RedirtyLoggedCardTableEntryClosure redirty;
   459   JavaThread::dirty_card_queue_set().set_closure(&redirty);
   460   dcqs.apply_closure_to_all_completed_buffers();
   461   dcqs.iterate_closure_all_threads(false);
   462   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   463                          clear.calls(), orig_count);
   464   guarantee(redirty.calls() == clear.calls(),
   465             "Or else mechanism is broken.");
   467   CountNonCleanMemRegionClosure count3(this);
   468   ct_bs->mod_card_iterate(&count3);
   469   if (count3.n() != orig_count) {
   470     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   471                            orig_count, count3.n());
   472     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   473   }
   475   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
   476 }
   478 // Private class members.
   480 G1CollectedHeap* G1CollectedHeap::_g1h;
   482 // Private methods.
   484 // Finds a HeapRegion that can be used to allocate a given size of block.
   487 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
   488                                                  bool do_expand,
   489                                                  bool zero_filled) {
   490   ConcurrentZFThread::note_region_alloc();
   491   HeapRegion* res = alloc_free_region_from_lists(zero_filled);
   492   if (res == NULL && do_expand) {
   493     expand(word_size * HeapWordSize);
   494     res = alloc_free_region_from_lists(zero_filled);
   495     assert(res == NULL ||
   496            (!res->isHumongous() &&
   497             (!zero_filled ||
   498              res->zero_fill_state() == HeapRegion::Allocated)),
   499            "Alloc Regions must be zero filled (and non-H)");
   500   }
   501   if (res != NULL) {
   502     if (res->is_empty()) {
   503       _free_regions--;
   504     }
   505     assert(!res->isHumongous() &&
   506            (!zero_filled || res->zero_fill_state() == HeapRegion::Allocated),
   507            err_msg("Non-young alloc Regions must be zero filled (and non-H):"
   508                    " res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d",
   509                    res->isHumongous(), zero_filled, res->zero_fill_state()));
   510     assert(!res->is_on_unclean_list(),
   511            "Alloc Regions must not be on the unclean list");
   512     if (G1PrintHeapRegions) {
   513       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
   514                              "top "PTR_FORMAT,
   515                              res->hrs_index(), res->bottom(), res->end(), res->top());
   516     }
   517   }
   518   return res;
   519 }
   521 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
   522                                                          size_t word_size,
   523                                                          bool zero_filled) {
   524   HeapRegion* alloc_region = NULL;
   525   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   526     alloc_region = newAllocRegion_work(word_size, true, zero_filled);
   527     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   528       alloc_region->set_survivor();
   529     }
   530     ++_gc_alloc_region_counts[purpose];
   531   } else {
   532     g1_policy()->note_alloc_region_limit_reached(purpose);
   533   }
   534   return alloc_region;
   535 }
   537 // If could fit into free regions w/o expansion, try.
   538 // Otherwise, if can expand, do so.
   539 // Otherwise, if using ex regions might help, try with ex given back.
   540 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
   541   assert_heap_locked_or_at_safepoint();
   542   assert(regions_accounted_for(), "Region leakage!");
   544   // We can't allocate humongous regions while cleanupComplete is
   545   // running, since some of the regions we find to be empty might not
   546   // yet be added to the unclean list. If we're already at a
   547   // safepoint, this call is unnecessary, not to mention wrong.
   548   if (!SafepointSynchronize::is_at_safepoint()) {
   549     wait_for_cleanup_complete();
   550   }
   552   size_t num_regions =
   553          round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   555   // Special case if < one region???
   557   // Remember the ft size.
   558   size_t x_size = expansion_regions();
   560   HeapWord* res = NULL;
   561   bool eliminated_allocated_from_lists = false;
   563   // Can the allocation potentially fit in the free regions?
   564   if (free_regions() >= num_regions) {
   565     res = _hrs->obj_allocate(word_size);
   566   }
   567   if (res == NULL) {
   568     // Try expansion.
   569     size_t fs = _hrs->free_suffix();
   570     if (fs + x_size >= num_regions) {
   571       expand((num_regions - fs) * HeapRegion::GrainBytes);
   572       res = _hrs->obj_allocate(word_size);
   573       assert(res != NULL, "This should have worked.");
   574     } else {
   575       // Expansion won't help.  Are there enough free regions if we get rid
   576       // of reservations?
   577       size_t avail = free_regions();
   578       if (avail >= num_regions) {
   579         res = _hrs->obj_allocate(word_size);
   580         if (res != NULL) {
   581           remove_allocated_regions_from_lists();
   582           eliminated_allocated_from_lists = true;
   583         }
   584       }
   585     }
   586   }
   587   if (res != NULL) {
   588     // Increment by the number of regions allocated.
   589     // FIXME: Assumes regions all of size GrainBytes.
   590 #ifndef PRODUCT
   591     mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
   592                                            HeapRegion::GrainWords));
   593 #endif
   594     if (!eliminated_allocated_from_lists)
   595       remove_allocated_regions_from_lists();
   596     _summary_bytes_used += word_size * HeapWordSize;
   597     _free_regions -= num_regions;
   598     _num_humongous_regions += (int) num_regions;
   599   }
   600   assert(regions_accounted_for(), "Region Leakage");
   601   return res;
   602 }
   604 void
   605 G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
   606   // The cleanup operation might update _summary_bytes_used
   607   // concurrently with this method. So, right now, if we don't wait
   608   // for it to complete, updates to _summary_bytes_used might get
   609   // lost. This will be resolved in the near future when the operation
   610   // of the free region list is revamped as part of CR 6977804.
   611   wait_for_cleanup_complete();
   613   retire_cur_alloc_region_common(cur_alloc_region);
   614   assert(_cur_alloc_region == NULL, "post-condition");
   615 }
   617 // See the comment in the .hpp file about the locking protocol and
   618 // assumptions of this method (and other related ones).
   619 HeapWord*
   620 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
   621                                                        bool at_safepoint,
   622                                                        bool do_dirtying,
   623                                                        bool can_expand) {
   624   assert_heap_locked_or_at_safepoint();
   625   assert(_cur_alloc_region == NULL,
   626          "replace_cur_alloc_region_and_allocate() should only be called "
   627          "after retiring the previous current alloc region");
   628   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   629          "at_safepoint and is_at_safepoint() should be a tautology");
   630   assert(!can_expand || g1_policy()->can_expand_young_list(),
   631          "we should not call this method with can_expand == true if "
   632          "we are not allowed to expand the young gen");
   634   if (can_expand || !g1_policy()->is_young_list_full()) {
   635     if (!at_safepoint) {
   636       // The cleanup operation might update _summary_bytes_used
   637       // concurrently with this method. So, right now, if we don't
   638       // wait for it to complete, updates to _summary_bytes_used might
   639       // get lost. This will be resolved in the near future when the
   640       // operation of the free region list is revamped as part of
   641       // CR 6977804. If we're already at a safepoint, this call is
   642       // unnecessary, not to mention wrong.
   643       wait_for_cleanup_complete();
   644     }
   646     HeapRegion* new_cur_alloc_region = newAllocRegion(word_size,
   647                                                       false /* zero_filled */);
   648     if (new_cur_alloc_region != NULL) {
   649       assert(new_cur_alloc_region->is_empty(),
   650              "the newly-allocated region should be empty, "
   651              "as right now we only allocate new regions out of the free list");
   652       g1_policy()->update_region_num(true /* next_is_young */);
   653       _summary_bytes_used -= new_cur_alloc_region->used();
   654       set_region_short_lived_locked(new_cur_alloc_region);
   656       assert(!new_cur_alloc_region->isHumongous(),
   657              "Catch a regression of this bug.");
   659       // We need to ensure that the stores to _cur_alloc_region and,
   660       // subsequently, to top do not float above the setting of the
   661       // young type.
   662       OrderAccess::storestore();
   664       // Now allocate out of the new current alloc region. We could
   665       // have re-used allocate_from_cur_alloc_region() but its
   666       // operation is slightly different to what we need here. First,
   667       // allocate_from_cur_alloc_region() is only called outside a
   668       // safepoint and will always unlock the Heap_lock if it returns
   669       // a non-NULL result. Second, it assumes that the current alloc
   670       // region is what's already assigned in _cur_alloc_region. What
   671       // we want here is to actually do the allocation first before we
   672       // assign the new region to _cur_alloc_region. This ordering is
   673       // not currently important, but it will be essential when we
   674       // change the code to support CAS allocation in the future (see
   675       // CR 6994297).
   676       //
   677       // This allocate method does BOT updates and we don't need them in
   678       // the young generation. This will be fixed in the near future by
   679       // CR 6994297.
   680       HeapWord* result = new_cur_alloc_region->allocate(word_size);
   681       assert(result != NULL, "we just allocate out of an empty region "
   682              "so allocation should have been successful");
   683       assert(is_in(result), "result should be in the heap");
   685       _cur_alloc_region = new_cur_alloc_region;
   687       if (!at_safepoint) {
   688         Heap_lock->unlock();
   689       }
   691       // do the dirtying, if necessary, after we release the Heap_lock
   692       if (do_dirtying) {
   693         dirty_young_block(result, word_size);
   694       }
   695       return result;
   696     }
   697   }
   699   assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
   700          "alloc region, it should still be NULL");
   701   assert_heap_locked_or_at_safepoint();
   702   return NULL;
   703 }
   705 // See the comment in the .hpp file about the locking protocol and
   706 // assumptions of this method (and other related ones).
   707 HeapWord*
   708 G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
   709   assert_heap_locked_and_not_at_safepoint();
   710   assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
   711          "used for humongous allocations");
   713   // We will loop while succeeded is false, which means that we tried
   714   // to do a collection, but the VM op did not succeed. So, when we
   715   // exit the loop, either one of the allocation attempts was
   716   // successful, or we succeeded in doing the VM op but which was
   717   // unable to allocate after the collection.
   718   for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   719     bool succeeded = true;
   721     {
   722       // We may have concurrent cleanup working at the time. Wait for
   723       // it to complete. In the future we would probably want to make
   724       // the concurrent cleanup truly concurrent by decoupling it from
   725       // the allocation. This will happen in the near future as part
   726       // of CR 6977804 which will revamp the operation of the free
   727       // region list. The fact that wait_for_cleanup_complete() will
   728       // do a wait() means that we'll give up the Heap_lock. So, it's
   729       // possible that when we exit wait_for_cleanup_complete() we
   730       // might be able to allocate successfully (since somebody else
   731       // might have done a collection meanwhile). So, we'll attempt to
   732       // allocate again, just in case. When we make cleanup truly
   733       // concurrent with allocation, we should remove this allocation
   734       // attempt as it's redundant (we only reach here after an
   735       // allocation attempt has been unsuccessful).
   736       wait_for_cleanup_complete();
   737       HeapWord* result = attempt_allocation(word_size);
   738       if (result != NULL) {
   739         assert_heap_not_locked();
   740         return result;
   741       }
   742     }
   744     if (GC_locker::is_active_and_needs_gc()) {
   745       // We are locked out of GC because of the GC locker. We can
   746       // allocate a new region only if we can expand the young gen.
   748       if (g1_policy()->can_expand_young_list()) {
   749         // Yes, we are allowed to expand the young gen. Let's try to
   750         // allocate a new current alloc region.
   752         HeapWord* result =
   753           replace_cur_alloc_region_and_allocate(word_size,
   754                                                 false, /* at_safepoint */
   755                                                 true,  /* do_dirtying */
   756                                                 true   /* can_expand */);
   757         if (result != NULL) {
   758           assert_heap_not_locked();
   759           return result;
   760         }
   761       }
   762       // We could not expand the young gen further (or we could but we
   763       // failed to allocate a new region). We'll stall until the GC
   764       // locker forces a GC.
   766       // If this thread is not in a jni critical section, we stall
   767       // the requestor until the critical section has cleared and
   768       // GC allowed. When the critical section clears, a GC is
   769       // initiated by the last thread exiting the critical section; so
   770       // we retry the allocation sequence from the beginning of the loop,
   771       // rather than causing more, now probably unnecessary, GC attempts.
   772       JavaThread* jthr = JavaThread::current();
   773       assert(jthr != NULL, "sanity");
   774       if (!jthr->in_critical()) {
   775         MutexUnlocker mul(Heap_lock);
   776         GC_locker::stall_until_clear();
   778         // We'll then fall off the end of the ("if GC locker active")
   779         // if-statement and retry the allocation further down in the
   780         // loop.
   781       } else {
   782         if (CheckJNICalls) {
   783           fatal("Possible deadlock due to allocating while"
   784                 " in jni critical section");
   785         }
   786         return NULL;
   787       }
   788     } else {
   789       // We are not locked out. So, let's try to do a GC. The VM op
   790       // will retry the allocation before it completes.
   792       // Read the GC count while holding the Heap_lock
   793       unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   795       Heap_lock->unlock();
   797       HeapWord* result =
   798         do_collection_pause(word_size, gc_count_before, &succeeded);
   799       assert_heap_not_locked();
   800       if (result != NULL) {
   801         assert(succeeded, "the VM op should have succeeded");
   803         // Allocations that take place on VM operations do not do any
   804         // card dirtying and we have to do it here.
   805         dirty_young_block(result, word_size);
   806         return result;
   807       }
   809       Heap_lock->lock();
   810     }
   812     assert_heap_locked();
   814     // We can reach here when we were unsuccessful in doing a GC,
   815     // because another thread beat us to it, or because we were locked
   816     // out of GC due to the GC locker. In either case a new alloc
   817     // region might be available so we will retry the allocation.
   818     HeapWord* result = attempt_allocation(word_size);
   819     if (result != NULL) {
   820       assert_heap_not_locked();
   821       return result;
   822     }
   824     // So far our attempts to allocate failed. The only time we'll go
   825     // around the loop and try again is if we tried to do a GC and the
   826     // VM op that we tried to schedule was not successful because
   827     // another thread beat us to it. If that happened it's possible
   828     // that by the time we grabbed the Heap_lock again and tried to
   829     // allocate other threads filled up the young generation, which
   830     // means that the allocation attempt after the GC also failed. So,
   831     // it's worth trying to schedule another GC pause.
   832     if (succeeded) {
   833       break;
   834     }
   836     // Give a warning if we seem to be looping forever.
   837     if ((QueuedAllocationWarningCount > 0) &&
   838         (try_count % QueuedAllocationWarningCount == 0)) {
   839       warning("G1CollectedHeap::attempt_allocation_slow() "
   840               "retries %d times", try_count);
   841     }
   842   }
   844   assert_heap_locked();
   845   return NULL;
   846 }
   848 // See the comment in the .hpp file about the locking protocol and
   849 // assumptions of this method (and other related ones).
   850 HeapWord*
   851 G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
   852                                               bool at_safepoint) {
   853   // This is the method that will allocate a humongous object. All
   854   // allocation paths that attempt to allocate a humongous object
   855   // should eventually reach here. Currently, the only paths are from
   856   // mem_allocate() and attempt_allocation_at_safepoint().
   857   assert_heap_locked_or_at_safepoint();
   858   assert(isHumongous(word_size), "attempt_allocation_humongous() "
   859          "should only be used for humongous allocations");
   860   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   861          "at_safepoint and is_at_safepoint() should be a tautology");
   863   HeapWord* result = NULL;
   865   // We will loop while succeeded is false, which means that we tried
   866   // to do a collection, but the VM op did not succeed. So, when we
   867   // exit the loop, either one of the allocation attempts was
   868   // successful, or we succeeded in doing the VM op but which was
   869   // unable to allocate after the collection.
   870   for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   871     bool succeeded = true;
   873     // Given that humongous objects are not allocated in young
   874     // regions, we'll first try to do the allocation without doing a
   875     // collection hoping that there's enough space in the heap.
   876     result = humongous_obj_allocate(word_size);
   877     assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
   878            "catch a regression of this bug.");
   879     if (result != NULL) {
   880       if (!at_safepoint) {
   881         // If we're not at a safepoint, unlock the Heap_lock.
   882         Heap_lock->unlock();
   883       }
   884       return result;
   885     }
   887     // If we failed to allocate the humongous object, we should try to
   888     // do a collection pause (if we're allowed) in case it reclaims
   889     // enough space for the allocation to succeed after the pause.
   890     if (!at_safepoint) {
   891       // Read the GC count while holding the Heap_lock
   892       unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   894       // If we're allowed to do a collection we're not at a
   895       // safepoint, so it is safe to unlock the Heap_lock.
   896       Heap_lock->unlock();
   898       result = do_collection_pause(word_size, gc_count_before, &succeeded);
   899       assert_heap_not_locked();
   900       if (result != NULL) {
   901         assert(succeeded, "the VM op should have succeeded");
   902         return result;
   903       }
   905       // If we get here, the VM operation either did not succeed
   906       // (i.e., another thread beat us to it) or it succeeded but
   907       // failed to allocate the object.
   909       // If we're allowed to do a collection we're not at a
   910       // safepoint, so it is safe to lock the Heap_lock.
   911       Heap_lock->lock();
   912     }
   914     assert(result == NULL, "otherwise we should have exited the loop earlier");
   916     // So far our attempts to allocate failed. The only time we'll go
   917     // around the loop and try again is if we tried to do a GC and the
   918     // VM op that we tried to schedule was not successful because
   919     // another thread beat us to it. That way it's possible that some
   920     // space was freed up by the thread that successfully scheduled a
   921     // GC. So it's worth trying to allocate again.
   922     if (succeeded) {
   923       break;
   924     }
   926     // Give a warning if we seem to be looping forever.
   927     if ((QueuedAllocationWarningCount > 0) &&
   928         (try_count % QueuedAllocationWarningCount == 0)) {
   929       warning("G1CollectedHeap::attempt_allocation_humongous "
   930               "retries %d times", try_count);
   931     }
   932   }
   934   assert_heap_locked_or_at_safepoint();
   935   return NULL;
   936 }
   938 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
   939                                            bool expect_null_cur_alloc_region) {
   940   assert_at_safepoint();
   941   assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
   942          err_msg("the current alloc region was unexpectedly found "
   943                  "to be non-NULL, cur alloc region: "PTR_FORMAT" "
   944                  "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT,
   945                  _cur_alloc_region, expect_null_cur_alloc_region, word_size));
   947   if (!isHumongous(word_size)) {
   948     if (!expect_null_cur_alloc_region) {
   949       HeapRegion* cur_alloc_region = _cur_alloc_region;
   950       if (cur_alloc_region != NULL) {
   951         // This allocate method does BOT updates and we don't need them in
   952         // the young generation. This will be fixed in the near future by
   953         // CR 6994297.
   954         HeapWord* result = cur_alloc_region->allocate(word_size);
   955         if (result != NULL) {
   956           assert(is_in(result), "result should be in the heap");
   958           // We will not do any dirtying here. This is guaranteed to be
   959           // called during a safepoint and the thread that scheduled the
   960           // pause will do the dirtying if we return a non-NULL result.
   961           return result;
   962         }
   964         retire_cur_alloc_region_common(cur_alloc_region);
   965       }
   966     }
   968     assert(_cur_alloc_region == NULL,
   969            "at this point we should have no cur alloc region");
   970     return replace_cur_alloc_region_and_allocate(word_size,
   971                                                  true, /* at_safepoint */
   972                                                  false /* do_dirtying */,
   973                                                  false /* can_expand */);
   974   } else {
   975     return attempt_allocation_humongous(word_size,
   976                                         true /* at_safepoint */);
   977   }
   979   ShouldNotReachHere();
   980 }
   982 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   983   assert_heap_not_locked_and_not_at_safepoint();
   984   assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
   986   Heap_lock->lock();
   988   // First attempt: try allocating out of the current alloc region or
   989   // after replacing the current alloc region.
   990   HeapWord* result = attempt_allocation(word_size);
   991   if (result != NULL) {
   992     assert_heap_not_locked();
   993     return result;
   994   }
   996   assert_heap_locked();
   998   // Second attempt: go into the even slower path where we might
   999   // try to schedule a collection.
  1000   result = attempt_allocation_slow(word_size);
  1001   if (result != NULL) {
  1002     assert_heap_not_locked();
  1003     return result;
  1006   assert_heap_locked();
  1007   Heap_lock->unlock();
  1008   return NULL;
  1011 HeapWord*
  1012 G1CollectedHeap::mem_allocate(size_t word_size,
  1013                               bool   is_noref,
  1014                               bool   is_tlab,
  1015                               bool*  gc_overhead_limit_was_exceeded) {
  1016   assert_heap_not_locked_and_not_at_safepoint();
  1017   assert(!is_tlab, "mem_allocate() this should not be called directly "
  1018          "to allocate TLABs");
  1020   // Loop until the allocation is satisified,
  1021   // or unsatisfied after GC.
  1022   for (int try_count = 1; /* we'll return */; try_count += 1) {
  1023     unsigned int gc_count_before;
  1025       Heap_lock->lock();
  1027       if (!isHumongous(word_size)) {
  1028         // First attempt: try allocating out of the current alloc
  1029         // region or after replacing the current alloc region.
  1030         HeapWord* result = attempt_allocation(word_size);
  1031         if (result != NULL) {
  1032           assert_heap_not_locked();
  1033           return result;
  1036         assert_heap_locked();
  1038         // Second attempt: go into the even slower path where we might
  1039         // try to schedule a collection.
  1040         result = attempt_allocation_slow(word_size);
  1041         if (result != NULL) {
  1042           assert_heap_not_locked();
  1043           return result;
  1045       } else {
  1046         HeapWord* result = attempt_allocation_humongous(word_size,
  1047                                                      false /* at_safepoint */);
  1048         if (result != NULL) {
  1049           assert_heap_not_locked();
  1050           return result;
  1054       assert_heap_locked();
  1055       // Read the gc count while the heap lock is held.
  1056       gc_count_before = SharedHeap::heap()->total_collections();
  1057       // We cannot be at a safepoint, so it is safe to unlock the Heap_lock
  1058       Heap_lock->unlock();
  1061     // Create the garbage collection operation...
  1062     VM_G1CollectForAllocation op(gc_count_before, word_size);
  1063     // ...and get the VM thread to execute it.
  1064     VMThread::execute(&op);
  1066     assert_heap_not_locked();
  1067     if (op.prologue_succeeded() && op.pause_succeeded()) {
  1068       // If the operation was successful we'll return the result even
  1069       // if it is NULL. If the allocation attempt failed immediately
  1070       // after a Full GC, it's unlikely we'll be able to allocate now.
  1071       HeapWord* result = op.result();
  1072       if (result != NULL && !isHumongous(word_size)) {
  1073         // Allocations that take place on VM operations do not do any
  1074         // card dirtying and we have to do it here. We only have to do
  1075         // this for non-humongous allocations, though.
  1076         dirty_young_block(result, word_size);
  1078       return result;
  1079     } else {
  1080       assert(op.result() == NULL,
  1081              "the result should be NULL if the VM op did not succeed");
  1084     // Give a warning if we seem to be looping forever.
  1085     if ((QueuedAllocationWarningCount > 0) &&
  1086         (try_count % QueuedAllocationWarningCount == 0)) {
  1087       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
  1091   ShouldNotReachHere();
  1094 void G1CollectedHeap::abandon_cur_alloc_region() {
  1095   if (_cur_alloc_region != NULL) {
  1096     // We're finished with the _cur_alloc_region.
  1097     if (_cur_alloc_region->is_empty()) {
  1098       _free_regions++;
  1099       free_region(_cur_alloc_region);
  1100     } else {
  1101       // As we're builing (at least the young portion) of the collection
  1102       // set incrementally we'll add the current allocation region to
  1103       // the collection set here.
  1104       if (_cur_alloc_region->is_young()) {
  1105         g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
  1107       _summary_bytes_used += _cur_alloc_region->used();
  1109     _cur_alloc_region = NULL;
  1113 void G1CollectedHeap::abandon_gc_alloc_regions() {
  1114   // first, make sure that the GC alloc region list is empty (it should!)
  1115   assert(_gc_alloc_region_list == NULL, "invariant");
  1116   release_gc_alloc_regions(true /* totally */);
  1119 class PostMCRemSetClearClosure: public HeapRegionClosure {
  1120   ModRefBarrierSet* _mr_bs;
  1121 public:
  1122   PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
  1123   bool doHeapRegion(HeapRegion* r) {
  1124     r->reset_gc_time_stamp();
  1125     if (r->continuesHumongous())
  1126       return false;
  1127     HeapRegionRemSet* hrrs = r->rem_set();
  1128     if (hrrs != NULL) hrrs->clear();
  1129     // You might think here that we could clear just the cards
  1130     // corresponding to the used region.  But no: if we leave a dirty card
  1131     // in a region we might allocate into, then it would prevent that card
  1132     // from being enqueued, and cause it to be missed.
  1133     // Re: the performance cost: we shouldn't be doing full GC anyway!
  1134     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  1135     return false;
  1137 };
  1140 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
  1141   ModRefBarrierSet* _mr_bs;
  1142 public:
  1143   PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
  1144   bool doHeapRegion(HeapRegion* r) {
  1145     if (r->continuesHumongous()) return false;
  1146     if (r->used_region().word_size() != 0) {
  1147       _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
  1149     return false;
  1151 };
  1153 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  1154   G1CollectedHeap*   _g1h;
  1155   UpdateRSOopClosure _cl;
  1156   int                _worker_i;
  1157 public:
  1158   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
  1159     _cl(g1->g1_rem_set(), worker_i),
  1160     _worker_i(worker_i),
  1161     _g1h(g1)
  1162   { }
  1164   bool doHeapRegion(HeapRegion* r) {
  1165     if (!r->continuesHumongous()) {
  1166       _cl.set_from(r);
  1167       r->oop_iterate(&_cl);
  1169     return false;
  1171 };
  1173 class ParRebuildRSTask: public AbstractGangTask {
  1174   G1CollectedHeap* _g1;
  1175 public:
  1176   ParRebuildRSTask(G1CollectedHeap* g1)
  1177     : AbstractGangTask("ParRebuildRSTask"),
  1178       _g1(g1)
  1179   { }
  1181   void work(int i) {
  1182     RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
  1183     _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
  1184                                          HeapRegion::RebuildRSClaimValue);
  1186 };
  1188 bool G1CollectedHeap::do_collection(bool explicit_gc,
  1189                                     bool clear_all_soft_refs,
  1190                                     size_t word_size) {
  1191   if (GC_locker::check_active_before_gc()) {
  1192     return false;
  1195   SvcGCMarker sgcm(SvcGCMarker::FULL);
  1196   ResourceMark rm;
  1198   if (PrintHeapAtGC) {
  1199     Universe::print_heap_before_gc();
  1202   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  1203   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
  1205   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
  1206                            collector_policy()->should_clear_all_soft_refs();
  1208   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
  1211     IsGCActiveMark x;
  1213     // Timing
  1214     bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
  1215     assert(!system_gc || explicit_gc, "invariant");
  1216     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  1217     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  1218     TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
  1219                 PrintGC, true, gclog_or_tty);
  1221     TraceMemoryManagerStats tms(true /* fullGC */);
  1223     double start = os::elapsedTime();
  1224     g1_policy()->record_full_collection_start();
  1226     gc_prologue(true);
  1227     increment_total_collections(true /* full gc */);
  1229     size_t g1h_prev_used = used();
  1230     assert(used() == recalculate_used(), "Should be equal");
  1232     if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  1233       HandleMark hm;  // Discard invalid handles created during verification
  1234       prepare_for_verify();
  1235       gclog_or_tty->print(" VerifyBeforeGC:");
  1236       Universe::verify(true);
  1238     assert(regions_accounted_for(), "Region leakage!");
  1240     COMPILER2_PRESENT(DerivedPointerTable::clear());
  1242     // We want to discover references, but not process them yet.
  1243     // This mode is disabled in
  1244     // instanceRefKlass::process_discovered_references if the
  1245     // generation does some collection work, or
  1246     // instanceRefKlass::enqueue_discovered_references if the
  1247     // generation returns without doing any work.
  1248     ref_processor()->disable_discovery();
  1249     ref_processor()->abandon_partial_discovery();
  1250     ref_processor()->verify_no_references_recorded();
  1252     // Abandon current iterations of concurrent marking and concurrent
  1253     // refinement, if any are in progress.
  1254     concurrent_mark()->abort();
  1256     // Make sure we'll choose a new allocation region afterwards.
  1257     abandon_cur_alloc_region();
  1258     abandon_gc_alloc_regions();
  1259     assert(_cur_alloc_region == NULL, "Invariant.");
  1260     g1_rem_set()->cleanupHRRS();
  1261     tear_down_region_lists();
  1262     set_used_regions_to_need_zero_fill();
  1264     // We may have added regions to the current incremental collection
  1265     // set between the last GC or pause and now. We need to clear the
  1266     // incremental collection set and then start rebuilding it afresh
  1267     // after this full GC.
  1268     abandon_collection_set(g1_policy()->inc_cset_head());
  1269     g1_policy()->clear_incremental_cset();
  1270     g1_policy()->stop_incremental_cset_building();
  1272     if (g1_policy()->in_young_gc_mode()) {
  1273       empty_young_list();
  1274       g1_policy()->set_full_young_gcs(true);
  1277     // See the comment in G1CollectedHeap::ref_processing_init() about
  1278     // how reference processing currently works in G1.
  1280     // Temporarily make reference _discovery_ single threaded (non-MT).
  1281     ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
  1283     // Temporarily make refs discovery atomic
  1284     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
  1286     // Temporarily clear _is_alive_non_header
  1287     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
  1289     ref_processor()->enable_discovery();
  1290     ref_processor()->setup_policy(do_clear_all_soft_refs);
  1292     // Do collection work
  1294       HandleMark hm;  // Discard invalid handles created during gc
  1295       G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
  1297     // Because freeing humongous regions may have added some unclean
  1298     // regions, it is necessary to tear down again before rebuilding.
  1299     tear_down_region_lists();
  1300     rebuild_region_lists();
  1302     _summary_bytes_used = recalculate_used();
  1304     ref_processor()->enqueue_discovered_references();
  1306     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1308     MemoryService::track_memory_usage();
  1310     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  1311       HandleMark hm;  // Discard invalid handles created during verification
  1312       gclog_or_tty->print(" VerifyAfterGC:");
  1313       prepare_for_verify();
  1314       Universe::verify(false);
  1316     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
  1318     reset_gc_time_stamp();
  1319     // Since everything potentially moved, we will clear all remembered
  1320     // sets, and clear all cards.  Later we will rebuild remebered
  1321     // sets. We will also reset the GC time stamps of the regions.
  1322     PostMCRemSetClearClosure rs_clear(mr_bs());
  1323     heap_region_iterate(&rs_clear);
  1325     // Resize the heap if necessary.
  1326     resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  1328     if (_cg1r->use_cache()) {
  1329       _cg1r->clear_and_record_card_counts();
  1330       _cg1r->clear_hot_cache();
  1333     // Rebuild remembered sets of all regions.
  1335     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1336       ParRebuildRSTask rebuild_rs_task(this);
  1337       assert(check_heap_region_claim_values(
  1338              HeapRegion::InitialClaimValue), "sanity check");
  1339       set_par_threads(workers()->total_workers());
  1340       workers()->run_task(&rebuild_rs_task);
  1341       set_par_threads(0);
  1342       assert(check_heap_region_claim_values(
  1343              HeapRegion::RebuildRSClaimValue), "sanity check");
  1344       reset_heap_region_claim_values();
  1345     } else {
  1346       RebuildRSOutOfRegionClosure rebuild_rs(this);
  1347       heap_region_iterate(&rebuild_rs);
  1350     if (PrintGC) {
  1351       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  1354     if (true) { // FIXME
  1355       // Ask the permanent generation to adjust size for full collections
  1356       perm()->compute_new_size();
  1359     // Start a new incremental collection set for the next pause
  1360     assert(g1_policy()->collection_set() == NULL, "must be");
  1361     g1_policy()->start_incremental_cset_building();
  1363     // Clear the _cset_fast_test bitmap in anticipation of adding
  1364     // regions to the incremental collection set for the next
  1365     // evacuation pause.
  1366     clear_cset_fast_test();
  1368     double end = os::elapsedTime();
  1369     g1_policy()->record_full_collection_end();
  1371 #ifdef TRACESPINNING
  1372     ParallelTaskTerminator::print_termination_counts();
  1373 #endif
  1375     gc_epilogue(true);
  1377     // Discard all rset updates
  1378     JavaThread::dirty_card_queue_set().abandon_logs();
  1379     assert(!G1DeferredRSUpdate
  1380            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1381     assert(regions_accounted_for(), "Region leakage!");
  1384   if (g1_policy()->in_young_gc_mode()) {
  1385     _young_list->reset_sampled_info();
  1386     // At this point there should be no regions in the
  1387     // entire heap tagged as young.
  1388     assert( check_young_list_empty(true /* check_heap */),
  1389             "young list should be empty at this point");
  1392   // Update the number of full collections that have been completed.
  1393   increment_full_collections_completed(false /* concurrent */);
  1395   if (PrintHeapAtGC) {
  1396     Universe::print_heap_after_gc();
  1399   return true;
  1402 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1403   // do_collection() will return whether it succeeded in performing
  1404   // the GC. Currently, there is no facility on the
  1405   // do_full_collection() API to notify the caller than the collection
  1406   // did not succeed (e.g., because it was locked out by the GC
  1407   // locker). So, right now, we'll ignore the return value.
  1408   bool dummy = do_collection(true,                /* explicit_gc */
  1409                              clear_all_soft_refs,
  1410                              0                    /* word_size */);
  1413 // This code is mostly copied from TenuredGeneration.
  1414 void
  1415 G1CollectedHeap::
  1416 resize_if_necessary_after_full_collection(size_t word_size) {
  1417   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
  1419   // Include the current allocation, if any, and bytes that will be
  1420   // pre-allocated to support collections, as "used".
  1421   const size_t used_after_gc = used();
  1422   const size_t capacity_after_gc = capacity();
  1423   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1425   // This is enforced in arguments.cpp.
  1426   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
  1427          "otherwise the code below doesn't make sense");
  1429   // We don't have floating point command-line arguments
  1430   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
  1431   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1432   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
  1433   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1435   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
  1436   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
  1438   // We have to be careful here as these two calculations can overflow
  1439   // 32-bit size_t's.
  1440   double used_after_gc_d = (double) used_after_gc;
  1441   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
  1442   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
  1444   // Let's make sure that they are both under the max heap size, which
  1445   // by default will make them fit into a size_t.
  1446   double desired_capacity_upper_bound = (double) max_heap_size;
  1447   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
  1448                                     desired_capacity_upper_bound);
  1449   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
  1450                                     desired_capacity_upper_bound);
  1452   // We can now safely turn them into size_t's.
  1453   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
  1454   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
  1456   // This assert only makes sense here, before we adjust them
  1457   // with respect to the min and max heap size.
  1458   assert(minimum_desired_capacity <= maximum_desired_capacity,
  1459          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
  1460                  "maximum_desired_capacity = "SIZE_FORMAT,
  1461                  minimum_desired_capacity, maximum_desired_capacity));
  1463   // Should not be greater than the heap max size. No need to adjust
  1464   // it with respect to the heap min size as it's a lower bound (i.e.,
  1465   // we'll try to make the capacity larger than it, not smaller).
  1466   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
  1467   // Should not be less than the heap min size. No need to adjust it
  1468   // with respect to the heap max size as it's an upper bound (i.e.,
  1469   // we'll try to make the capacity smaller than it, not greater).
  1470   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
  1472   if (PrintGC && Verbose) {
  1473     const double free_percentage =
  1474       (double) free_after_gc / (double) capacity_after_gc;
  1475     gclog_or_tty->print_cr("Computing new size after full GC ");
  1476     gclog_or_tty->print_cr("  "
  1477                            "  minimum_free_percentage: %6.2f",
  1478                            minimum_free_percentage);
  1479     gclog_or_tty->print_cr("  "
  1480                            "  maximum_free_percentage: %6.2f",
  1481                            maximum_free_percentage);
  1482     gclog_or_tty->print_cr("  "
  1483                            "  capacity: %6.1fK"
  1484                            "  minimum_desired_capacity: %6.1fK"
  1485                            "  maximum_desired_capacity: %6.1fK",
  1486                            (double) capacity_after_gc / (double) K,
  1487                            (double) minimum_desired_capacity / (double) K,
  1488                            (double) maximum_desired_capacity / (double) K);
  1489     gclog_or_tty->print_cr("  "
  1490                            "  free_after_gc: %6.1fK"
  1491                            "  used_after_gc: %6.1fK",
  1492                            (double) free_after_gc / (double) K,
  1493                            (double) used_after_gc / (double) K);
  1494     gclog_or_tty->print_cr("  "
  1495                            "   free_percentage: %6.2f",
  1496                            free_percentage);
  1498   if (capacity_after_gc < minimum_desired_capacity) {
  1499     // Don't expand unless it's significant
  1500     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1501     expand(expand_bytes);
  1502     if (PrintGC && Verbose) {
  1503       gclog_or_tty->print_cr("  "
  1504                              "  expanding:"
  1505                              "  max_heap_size: %6.1fK"
  1506                              "  minimum_desired_capacity: %6.1fK"
  1507                              "  expand_bytes: %6.1fK",
  1508                              (double) max_heap_size / (double) K,
  1509                              (double) minimum_desired_capacity / (double) K,
  1510                              (double) expand_bytes / (double) K);
  1513     // No expansion, now see if we want to shrink
  1514   } else if (capacity_after_gc > maximum_desired_capacity) {
  1515     // Capacity too large, compute shrinking size
  1516     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1517     shrink(shrink_bytes);
  1518     if (PrintGC && Verbose) {
  1519       gclog_or_tty->print_cr("  "
  1520                              "  shrinking:"
  1521                              "  min_heap_size: %6.1fK"
  1522                              "  maximum_desired_capacity: %6.1fK"
  1523                              "  shrink_bytes: %6.1fK",
  1524                              (double) min_heap_size / (double) K,
  1525                              (double) maximum_desired_capacity / (double) K,
  1526                              (double) shrink_bytes / (double) K);
  1532 HeapWord*
  1533 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
  1534                                            bool* succeeded) {
  1535   assert(SafepointSynchronize::is_at_safepoint(),
  1536          "satisfy_failed_allocation() should only be called at a safepoint");
  1537   assert(Thread::current()->is_VM_thread(),
  1538          "satisfy_failed_allocation() should only be called by the VM thread");
  1540   *succeeded = true;
  1541   // Let's attempt the allocation first.
  1542   HeapWord* result = attempt_allocation_at_safepoint(word_size,
  1543                                      false /* expect_null_cur_alloc_region */);
  1544   if (result != NULL) {
  1545     assert(*succeeded, "sanity");
  1546     return result;
  1549   // In a G1 heap, we're supposed to keep allocation from failing by
  1550   // incremental pauses.  Therefore, at least for now, we'll favor
  1551   // expansion over collection.  (This might change in the future if we can
  1552   // do something smarter than full collection to satisfy a failed alloc.)
  1553   result = expand_and_allocate(word_size);
  1554   if (result != NULL) {
  1555     assert(*succeeded, "sanity");
  1556     return result;
  1559   // Expansion didn't work, we'll try to do a Full GC.
  1560   bool gc_succeeded = do_collection(false, /* explicit_gc */
  1561                                     false, /* clear_all_soft_refs */
  1562                                     word_size);
  1563   if (!gc_succeeded) {
  1564     *succeeded = false;
  1565     return NULL;
  1568   // Retry the allocation
  1569   result = attempt_allocation_at_safepoint(word_size,
  1570                                       true /* expect_null_cur_alloc_region */);
  1571   if (result != NULL) {
  1572     assert(*succeeded, "sanity");
  1573     return result;
  1576   // Then, try a Full GC that will collect all soft references.
  1577   gc_succeeded = do_collection(false, /* explicit_gc */
  1578                                true,  /* clear_all_soft_refs */
  1579                                word_size);
  1580   if (!gc_succeeded) {
  1581     *succeeded = false;
  1582     return NULL;
  1585   // Retry the allocation once more
  1586   result = attempt_allocation_at_safepoint(word_size,
  1587                                       true /* expect_null_cur_alloc_region */);
  1588   if (result != NULL) {
  1589     assert(*succeeded, "sanity");
  1590     return result;
  1593   assert(!collector_policy()->should_clear_all_soft_refs(),
  1594          "Flag should have been handled and cleared prior to this point");
  1596   // What else?  We might try synchronous finalization later.  If the total
  1597   // space available is large enough for the allocation, then a more
  1598   // complete compaction phase than we've tried so far might be
  1599   // appropriate.
  1600   assert(*succeeded, "sanity");
  1601   return NULL;
  1604 // Attempting to expand the heap sufficiently
  1605 // to support an allocation of the given "word_size".  If
  1606 // successful, perform the allocation and return the address of the
  1607 // allocated block, or else "NULL".
  1609 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1610   assert(SafepointSynchronize::is_at_safepoint(),
  1611          "expand_and_allocate() should only be called at a safepoint");
  1612   assert(Thread::current()->is_VM_thread(),
  1613          "expand_and_allocate() should only be called by the VM thread");
  1615   size_t expand_bytes = word_size * HeapWordSize;
  1616   if (expand_bytes < MinHeapDeltaBytes) {
  1617     expand_bytes = MinHeapDeltaBytes;
  1619   expand(expand_bytes);
  1620   assert(regions_accounted_for(), "Region leakage!");
  1622   return attempt_allocation_at_safepoint(word_size,
  1623                                      false /* expect_null_cur_alloc_region */);
  1626 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
  1627   size_t pre_used = 0;
  1628   size_t cleared_h_regions = 0;
  1629   size_t freed_regions = 0;
  1630   UncleanRegionList local_list;
  1631   free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
  1632                                     freed_regions, &local_list);
  1634   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
  1635                           &local_list);
  1636   return pre_used;
  1639 void
  1640 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
  1641                                                    size_t& pre_used,
  1642                                                    size_t& cleared_h,
  1643                                                    size_t& freed_regions,
  1644                                                    UncleanRegionList* list,
  1645                                                    bool par) {
  1646   assert(!hr->continuesHumongous(), "should have filtered these out");
  1647   size_t res = 0;
  1648   if (hr->used() > 0 && hr->garbage_bytes() == hr->used() &&
  1649       !hr->is_young()) {
  1650     if (G1PolicyVerbose > 0)
  1651       gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
  1652                                                                                " during cleanup", hr, hr->used());
  1653     free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
  1657 // FIXME: both this and shrink could probably be more efficient by
  1658 // doing one "VirtualSpace::expand_by" call rather than several.
  1659 void G1CollectedHeap::expand(size_t expand_bytes) {
  1660   size_t old_mem_size = _g1_storage.committed_size();
  1661   // We expand by a minimum of 1K.
  1662   expand_bytes = MAX2(expand_bytes, (size_t)K);
  1663   size_t aligned_expand_bytes =
  1664     ReservedSpace::page_align_size_up(expand_bytes);
  1665   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1666                                        HeapRegion::GrainBytes);
  1667   expand_bytes = aligned_expand_bytes;
  1668   while (expand_bytes > 0) {
  1669     HeapWord* base = (HeapWord*)_g1_storage.high();
  1670     // Commit more storage.
  1671     bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes);
  1672     if (!successful) {
  1673         expand_bytes = 0;
  1674     } else {
  1675       expand_bytes -= HeapRegion::GrainBytes;
  1676       // Expand the committed region.
  1677       HeapWord* high = (HeapWord*) _g1_storage.high();
  1678       _g1_committed.set_end(high);
  1679       // Create a new HeapRegion.
  1680       MemRegion mr(base, high);
  1681       bool is_zeroed = !_g1_max_committed.contains(base);
  1682       HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
  1684       // Now update max_committed if necessary.
  1685       _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
  1687       // Add it to the HeapRegionSeq.
  1688       _hrs->insert(hr);
  1689       // Set the zero-fill state, according to whether it's already
  1690       // zeroed.
  1692         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  1693         if (is_zeroed) {
  1694           hr->set_zero_fill_complete();
  1695           put_free_region_on_list_locked(hr);
  1696         } else {
  1697           hr->set_zero_fill_needed();
  1698           put_region_on_unclean_list_locked(hr);
  1701       _free_regions++;
  1702       // And we used up an expansion region to create it.
  1703       _expansion_regions--;
  1704       // Tell the cardtable about it.
  1705       Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1706       // And the offset table as well.
  1707       _bot_shared->resize(_g1_committed.word_size());
  1710   if (Verbose && PrintGC) {
  1711     size_t new_mem_size = _g1_storage.committed_size();
  1712     gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
  1713                            old_mem_size/K, aligned_expand_bytes/K,
  1714                            new_mem_size/K);
  1718 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
  1720   size_t old_mem_size = _g1_storage.committed_size();
  1721   size_t aligned_shrink_bytes =
  1722     ReservedSpace::page_align_size_down(shrink_bytes);
  1723   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1724                                          HeapRegion::GrainBytes);
  1725   size_t num_regions_deleted = 0;
  1726   MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
  1728   assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1729   if (mr.byte_size() > 0)
  1730     _g1_storage.shrink_by(mr.byte_size());
  1731   assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1733   _g1_committed.set_end(mr.start());
  1734   _free_regions -= num_regions_deleted;
  1735   _expansion_regions += num_regions_deleted;
  1737   // Tell the cardtable about it.
  1738   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1740   // And the offset table as well.
  1741   _bot_shared->resize(_g1_committed.word_size());
  1743   HeapRegionRemSet::shrink_heap(n_regions());
  1745   if (Verbose && PrintGC) {
  1746     size_t new_mem_size = _g1_storage.committed_size();
  1747     gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
  1748                            old_mem_size/K, aligned_shrink_bytes/K,
  1749                            new_mem_size/K);
  1753 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1754   release_gc_alloc_regions(true /* totally */);
  1755   tear_down_region_lists();  // We will rebuild them in a moment.
  1756   shrink_helper(shrink_bytes);
  1757   rebuild_region_lists();
  1760 // Public methods.
  1762 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1763 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1764 #endif // _MSC_VER
  1767 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1768   SharedHeap(policy_),
  1769   _g1_policy(policy_),
  1770   _dirty_card_queue_set(false),
  1771   _into_cset_dirty_card_queue_set(false),
  1772   _is_alive_closure(this),
  1773   _ref_processor(NULL),
  1774   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1775   _bot_shared(NULL),
  1776   _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
  1777   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1778   _evac_failure_scan_stack(NULL) ,
  1779   _mark_in_progress(false),
  1780   _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
  1781   _cur_alloc_region(NULL),
  1782   _refine_cte_cl(NULL),
  1783   _free_region_list(NULL), _free_region_list_size(0),
  1784   _free_regions(0),
  1785   _full_collection(false),
  1786   _unclean_region_list(),
  1787   _unclean_regions_coming(false),
  1788   _young_list(new YoungList(this)),
  1789   _gc_time_stamp(0),
  1790   _surviving_young_words(NULL),
  1791   _full_collections_completed(0),
  1792   _in_cset_fast_test(NULL),
  1793   _in_cset_fast_test_base(NULL),
  1794   _dirty_cards_region_list(NULL) {
  1795   _g1h = this; // To catch bugs.
  1796   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1797     vm_exit_during_initialization("Failed necessary allocation.");
  1800   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
  1802   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1803   _task_queues = new RefToScanQueueSet(n_queues);
  1805   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1806   assert(n_rem_sets > 0, "Invariant.");
  1808   HeapRegionRemSetIterator** iter_arr =
  1809     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
  1810   for (int i = 0; i < n_queues; i++) {
  1811     iter_arr[i] = new HeapRegionRemSetIterator();
  1813   _rem_set_iterator = iter_arr;
  1815   for (int i = 0; i < n_queues; i++) {
  1816     RefToScanQueue* q = new RefToScanQueue();
  1817     q->initialize();
  1818     _task_queues->register_queue(i, q);
  1821   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  1822     _gc_alloc_regions[ap]          = NULL;
  1823     _gc_alloc_region_counts[ap]    = 0;
  1824     _retained_gc_alloc_regions[ap] = NULL;
  1825     // by default, we do not retain a GC alloc region for each ap;
  1826     // we'll override this, when appropriate, below
  1827     _retain_gc_alloc_region[ap]    = false;
  1830   // We will try to remember the last half-full tenured region we
  1831   // allocated to at the end of a collection so that we can re-use it
  1832   // during the next collection.
  1833   _retain_gc_alloc_region[GCAllocForTenured]  = true;
  1835   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1838 jint G1CollectedHeap::initialize() {
  1839   CollectedHeap::pre_initialize();
  1840   os::enable_vtime();
  1842   // Necessary to satisfy locking discipline assertions.
  1844   MutexLocker x(Heap_lock);
  1846   // While there are no constraints in the GC code that HeapWordSize
  1847   // be any particular value, there are multiple other areas in the
  1848   // system which believe this to be true (e.g. oop->object_size in some
  1849   // cases incorrectly returns the size in wordSize units rather than
  1850   // HeapWordSize).
  1851   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  1853   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  1854   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  1856   // Ensure that the sizes are properly aligned.
  1857   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1858   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1860   _cg1r = new ConcurrentG1Refine();
  1862   // Reserve the maximum.
  1863   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
  1864   // Includes the perm-gen.
  1866   const size_t total_reserved = max_byte_size + pgs->max_size();
  1867   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
  1869   ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
  1870                         HeapRegion::GrainBytes,
  1871                         false /*ism*/, addr);
  1873   if (UseCompressedOops) {
  1874     if (addr != NULL && !heap_rs.is_reserved()) {
  1875       // Failed to reserve at specified address - the requested memory
  1876       // region is taken already, for example, by 'java' launcher.
  1877       // Try again to reserver heap higher.
  1878       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
  1879       ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
  1880                              false /*ism*/, addr);
  1881       if (addr != NULL && !heap_rs0.is_reserved()) {
  1882         // Failed to reserve at specified address again - give up.
  1883         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
  1884         assert(addr == NULL, "");
  1885         ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
  1886                                false /*ism*/, addr);
  1887         heap_rs = heap_rs1;
  1888       } else {
  1889         heap_rs = heap_rs0;
  1894   if (!heap_rs.is_reserved()) {
  1895     vm_exit_during_initialization("Could not reserve enough space for object heap");
  1896     return JNI_ENOMEM;
  1899   // It is important to do this in a way such that concurrent readers can't
  1900   // temporarily think somethings in the heap.  (I've actually seen this
  1901   // happen in asserts: DLD.)
  1902   _reserved.set_word_size(0);
  1903   _reserved.set_start((HeapWord*)heap_rs.base());
  1904   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  1906   _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
  1908   _num_humongous_regions = 0;
  1910   // Create the gen rem set (and barrier set) for the entire reserved region.
  1911   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  1912   set_barrier_set(rem_set()->bs());
  1913   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  1914     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  1915   } else {
  1916     vm_exit_during_initialization("G1 requires a mod ref bs.");
  1917     return JNI_ENOMEM;
  1920   // Also create a G1 rem set.
  1921   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
  1922     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
  1923   } else {
  1924     vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
  1925     return JNI_ENOMEM;
  1928   // Carve out the G1 part of the heap.
  1930   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  1931   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  1932                            g1_rs.size()/HeapWordSize);
  1933   ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
  1935   _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
  1937   _g1_storage.initialize(g1_rs, 0);
  1938   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  1939   _g1_max_committed = _g1_committed;
  1940   _hrs = new HeapRegionSeq(_expansion_regions);
  1941   guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
  1942   guarantee(_cur_alloc_region == NULL, "from constructor");
  1944   // 6843694 - ensure that the maximum region index can fit
  1945   // in the remembered set structures.
  1946   const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  1947   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  1949   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  1950   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
  1951   guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
  1952             "too many cards per region");
  1954   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  1955                                              heap_word_size(init_byte_size));
  1957   _g1h = this;
  1959    _in_cset_fast_test_length = max_regions();
  1960    _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
  1962    // We're biasing _in_cset_fast_test to avoid subtracting the
  1963    // beginning of the heap every time we want to index; basically
  1964    // it's the same with what we do with the card table.
  1965    _in_cset_fast_test = _in_cset_fast_test_base -
  1966                 ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  1968    // Clear the _cset_fast_test bitmap in anticipation of adding
  1969    // regions to the incremental collection set for the first
  1970    // evacuation pause.
  1971    clear_cset_fast_test();
  1973   // Create the ConcurrentMark data structure and thread.
  1974   // (Must do this late, so that "max_regions" is defined.)
  1975   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
  1976   _cmThread = _cm->cmThread();
  1978   // ...and the concurrent zero-fill thread, if necessary.
  1979   if (G1ConcZeroFill) {
  1980     _czft = new ConcurrentZFThread();
  1983   // Initialize the from_card cache structure of HeapRegionRemSet.
  1984   HeapRegionRemSet::init_heap(max_regions());
  1986   // Now expand into the initial heap size.
  1987   expand(init_byte_size);
  1989   // Perform any initialization actions delegated to the policy.
  1990   g1_policy()->init();
  1992   g1_policy()->note_start_of_mark_thread();
  1994   _refine_cte_cl =
  1995     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
  1996                                     g1_rem_set(),
  1997                                     concurrent_g1_refine());
  1998   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
  2000   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  2001                                                SATB_Q_FL_lock,
  2002                                                G1SATBProcessCompletedThreshold,
  2003                                                Shared_SATB_Q_lock);
  2005   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2006                                                 DirtyCardQ_FL_lock,
  2007                                                 concurrent_g1_refine()->yellow_zone(),
  2008                                                 concurrent_g1_refine()->red_zone(),
  2009                                                 Shared_DirtyCardQ_lock);
  2011   if (G1DeferredRSUpdate) {
  2012     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  2013                                       DirtyCardQ_FL_lock,
  2014                                       -1, // never trigger processing
  2015                                       -1, // no limit on length
  2016                                       Shared_DirtyCardQ_lock,
  2017                                       &JavaThread::dirty_card_queue_set());
  2020   // Initialize the card queue set used to hold cards containing
  2021   // references into the collection set.
  2022   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
  2023                                              DirtyCardQ_FL_lock,
  2024                                              -1, // never trigger processing
  2025                                              -1, // no limit on length
  2026                                              Shared_DirtyCardQ_lock,
  2027                                              &JavaThread::dirty_card_queue_set());
  2029   // In case we're keeping closure specialization stats, initialize those
  2030   // counts and that mechanism.
  2031   SpecializationStats::clear();
  2033   _gc_alloc_region_list = NULL;
  2035   // Do later initialization work for concurrent refinement.
  2036   _cg1r->init();
  2038   return JNI_OK;
  2041 void G1CollectedHeap::ref_processing_init() {
  2042   // Reference processing in G1 currently works as follows:
  2043   //
  2044   // * There is only one reference processor instance that
  2045   //   'spans' the entire heap. It is created by the code
  2046   //   below.
  2047   // * Reference discovery is not enabled during an incremental
  2048   //   pause (see 6484982).
  2049   // * Discoverered refs are not enqueued nor are they processed
  2050   //   during an incremental pause (see 6484982).
  2051   // * Reference discovery is enabled at initial marking.
  2052   // * Reference discovery is disabled and the discovered
  2053   //   references processed etc during remarking.
  2054   // * Reference discovery is MT (see below).
  2055   // * Reference discovery requires a barrier (see below).
  2056   // * Reference processing is currently not MT (see 6608385).
  2057   // * A full GC enables (non-MT) reference discovery and
  2058   //   processes any discovered references.
  2060   SharedHeap::ref_processing_init();
  2061   MemRegion mr = reserved_region();
  2062   _ref_processor = ReferenceProcessor::create_ref_processor(
  2063                                          mr,    // span
  2064                                          false, // Reference discovery is not atomic
  2065                                          true,  // mt_discovery
  2066                                          &_is_alive_closure, // is alive closure
  2067                                                              // for efficiency
  2068                                          ParallelGCThreads,
  2069                                          ParallelRefProcEnabled,
  2070                                          true); // Setting next fields of discovered
  2071                                                 // lists requires a barrier.
  2074 size_t G1CollectedHeap::capacity() const {
  2075   return _g1_committed.byte_size();
  2078 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  2079                                                  DirtyCardQueue* into_cset_dcq,
  2080                                                  bool concurrent,
  2081                                                  int worker_i) {
  2082   // Clean cards in the hot card cache
  2083   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
  2085   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2086   int n_completed_buffers = 0;
  2087   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
  2088     n_completed_buffers++;
  2090   g1_policy()->record_update_rs_processed_buffers(worker_i,
  2091                                                   (double) n_completed_buffers);
  2092   dcqs.clear_n_completed_buffers();
  2093   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  2097 // Computes the sum of the storage used by the various regions.
  2099 size_t G1CollectedHeap::used() const {
  2100   assert(Heap_lock->owner() != NULL,
  2101          "Should be owned on this thread's behalf.");
  2102   size_t result = _summary_bytes_used;
  2103   // Read only once in case it is set to NULL concurrently
  2104   HeapRegion* hr = _cur_alloc_region;
  2105   if (hr != NULL)
  2106     result += hr->used();
  2107   return result;
  2110 size_t G1CollectedHeap::used_unlocked() const {
  2111   size_t result = _summary_bytes_used;
  2112   return result;
  2115 class SumUsedClosure: public HeapRegionClosure {
  2116   size_t _used;
  2117 public:
  2118   SumUsedClosure() : _used(0) {}
  2119   bool doHeapRegion(HeapRegion* r) {
  2120     if (!r->continuesHumongous()) {
  2121       _used += r->used();
  2123     return false;
  2125   size_t result() { return _used; }
  2126 };
  2128 size_t G1CollectedHeap::recalculate_used() const {
  2129   SumUsedClosure blk;
  2130   _hrs->iterate(&blk);
  2131   return blk.result();
  2134 #ifndef PRODUCT
  2135 class SumUsedRegionsClosure: public HeapRegionClosure {
  2136   size_t _num;
  2137 public:
  2138   SumUsedRegionsClosure() : _num(0) {}
  2139   bool doHeapRegion(HeapRegion* r) {
  2140     if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
  2141       _num += 1;
  2143     return false;
  2145   size_t result() { return _num; }
  2146 };
  2148 size_t G1CollectedHeap::recalculate_used_regions() const {
  2149   SumUsedRegionsClosure blk;
  2150   _hrs->iterate(&blk);
  2151   return blk.result();
  2153 #endif // PRODUCT
  2155 size_t G1CollectedHeap::unsafe_max_alloc() {
  2156   if (_free_regions > 0) return HeapRegion::GrainBytes;
  2157   // otherwise, is there space in the current allocation region?
  2159   // We need to store the current allocation region in a local variable
  2160   // here. The problem is that this method doesn't take any locks and
  2161   // there may be other threads which overwrite the current allocation
  2162   // region field. attempt_allocation(), for example, sets it to NULL
  2163   // and this can happen *after* the NULL check here but before the call
  2164   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  2165   // to be a problem in the optimized build, since the two loads of the
  2166   // current allocation region field are optimized away.
  2167   HeapRegion* car = _cur_alloc_region;
  2169   // FIXME: should iterate over all regions?
  2170   if (car == NULL) {
  2171     return 0;
  2173   return car->free();
  2176 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
  2177   return
  2178     ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
  2179      (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
  2182 void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
  2183   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
  2185   // We assume that if concurrent == true, then the caller is a
  2186   // concurrent thread that was joined the Suspendible Thread
  2187   // Set. If there's ever a cheap way to check this, we should add an
  2188   // assert here.
  2190   // We have already incremented _total_full_collections at the start
  2191   // of the GC, so total_full_collections() represents how many full
  2192   // collections have been started.
  2193   unsigned int full_collections_started = total_full_collections();
  2195   // Given that this method is called at the end of a Full GC or of a
  2196   // concurrent cycle, and those can be nested (i.e., a Full GC can
  2197   // interrupt a concurrent cycle), the number of full collections
  2198   // completed should be either one (in the case where there was no
  2199   // nesting) or two (when a Full GC interrupted a concurrent cycle)
  2200   // behind the number of full collections started.
  2202   // This is the case for the inner caller, i.e. a Full GC.
  2203   assert(concurrent ||
  2204          (full_collections_started == _full_collections_completed + 1) ||
  2205          (full_collections_started == _full_collections_completed + 2),
  2206          err_msg("for inner caller (Full GC): full_collections_started = %u "
  2207                  "is inconsistent with _full_collections_completed = %u",
  2208                  full_collections_started, _full_collections_completed));
  2210   // This is the case for the outer caller, i.e. the concurrent cycle.
  2211   assert(!concurrent ||
  2212          (full_collections_started == _full_collections_completed + 1),
  2213          err_msg("for outer caller (concurrent cycle): "
  2214                  "full_collections_started = %u "
  2215                  "is inconsistent with _full_collections_completed = %u",
  2216                  full_collections_started, _full_collections_completed));
  2218   _full_collections_completed += 1;
  2220   // We need to clear the "in_progress" flag in the CM thread before
  2221   // we wake up any waiters (especially when ExplicitInvokesConcurrent
  2222   // is set) so that if a waiter requests another System.gc() it doesn't
  2223   // incorrectly see that a marking cyle is still in progress.
  2224   if (concurrent) {
  2225     _cmThread->clear_in_progress();
  2228   // This notify_all() will ensure that a thread that called
  2229   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
  2230   // and it's waiting for a full GC to finish will be woken up. It is
  2231   // waiting in VM_G1IncCollectionPause::doit_epilogue().
  2232   FullGCCount_lock->notify_all();
  2235 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  2236   assert(Thread::current()->is_VM_thread(), "Precondition#1");
  2237   assert(Heap_lock->is_locked(), "Precondition#2");
  2238   GCCauseSetter gcs(this, cause);
  2239   switch (cause) {
  2240     case GCCause::_heap_inspection:
  2241     case GCCause::_heap_dump: {
  2242       HandleMark hm;
  2243       do_full_collection(false);         // don't clear all soft refs
  2244       break;
  2246     default: // XXX FIX ME
  2247       ShouldNotReachHere(); // Unexpected use of this function
  2251 void G1CollectedHeap::collect(GCCause::Cause cause) {
  2252   // The caller doesn't have the Heap_lock
  2253   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  2255   unsigned int gc_count_before;
  2256   unsigned int full_gc_count_before;
  2258     MutexLocker ml(Heap_lock);
  2260     // Don't want to do a GC until cleanup is completed. This
  2261     // limitation will be removed in the near future when the
  2262     // operation of the free region list is revamped as part of
  2263     // CR 6977804.
  2264     wait_for_cleanup_complete();
  2266     // Read the GC count while holding the Heap_lock
  2267     gc_count_before = SharedHeap::heap()->total_collections();
  2268     full_gc_count_before = SharedHeap::heap()->total_full_collections();
  2271   if (should_do_concurrent_full_gc(cause)) {
  2272     // Schedule an initial-mark evacuation pause that will start a
  2273     // concurrent cycle. We're setting word_size to 0 which means that
  2274     // we are not requesting a post-GC allocation.
  2275     VM_G1IncCollectionPause op(gc_count_before,
  2276                                0,     /* word_size */
  2277                                true,  /* should_initiate_conc_mark */
  2278                                g1_policy()->max_pause_time_ms(),
  2279                                cause);
  2280     VMThread::execute(&op);
  2281   } else {
  2282     if (cause == GCCause::_gc_locker
  2283         DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
  2285       // Schedule a standard evacuation pause. We're setting word_size
  2286       // to 0 which means that we are not requesting a post-GC allocation.
  2287       VM_G1IncCollectionPause op(gc_count_before,
  2288                                  0,     /* word_size */
  2289                                  false, /* should_initiate_conc_mark */
  2290                                  g1_policy()->max_pause_time_ms(),
  2291                                  cause);
  2292       VMThread::execute(&op);
  2293     } else {
  2294       // Schedule a Full GC.
  2295       VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
  2296       VMThread::execute(&op);
  2301 bool G1CollectedHeap::is_in(const void* p) const {
  2302   if (_g1_committed.contains(p)) {
  2303     HeapRegion* hr = _hrs->addr_to_region(p);
  2304     return hr->is_in(p);
  2305   } else {
  2306     return _perm_gen->as_gen()->is_in(p);
  2310 // Iteration functions.
  2312 // Iterates an OopClosure over all ref-containing fields of objects
  2313 // within a HeapRegion.
  2315 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  2316   MemRegion _mr;
  2317   OopClosure* _cl;
  2318 public:
  2319   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  2320     : _mr(mr), _cl(cl) {}
  2321   bool doHeapRegion(HeapRegion* r) {
  2322     if (! r->continuesHumongous()) {
  2323       r->oop_iterate(_cl);
  2325     return false;
  2327 };
  2329 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
  2330   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  2331   _hrs->iterate(&blk);
  2332   if (do_perm) {
  2333     perm_gen()->oop_iterate(cl);
  2337 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
  2338   IterateOopClosureRegionClosure blk(mr, cl);
  2339   _hrs->iterate(&blk);
  2340   if (do_perm) {
  2341     perm_gen()->oop_iterate(cl);
  2345 // Iterates an ObjectClosure over all objects within a HeapRegion.
  2347 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  2348   ObjectClosure* _cl;
  2349 public:
  2350   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  2351   bool doHeapRegion(HeapRegion* r) {
  2352     if (! r->continuesHumongous()) {
  2353       r->object_iterate(_cl);
  2355     return false;
  2357 };
  2359 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
  2360   IterateObjectClosureRegionClosure blk(cl);
  2361   _hrs->iterate(&blk);
  2362   if (do_perm) {
  2363     perm_gen()->object_iterate(cl);
  2367 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
  2368   // FIXME: is this right?
  2369   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
  2372 // Calls a SpaceClosure on a HeapRegion.
  2374 class SpaceClosureRegionClosure: public HeapRegionClosure {
  2375   SpaceClosure* _cl;
  2376 public:
  2377   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  2378   bool doHeapRegion(HeapRegion* r) {
  2379     _cl->do_space(r);
  2380     return false;
  2382 };
  2384 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  2385   SpaceClosureRegionClosure blk(cl);
  2386   _hrs->iterate(&blk);
  2389 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
  2390   _hrs->iterate(cl);
  2393 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
  2394                                                HeapRegionClosure* cl) {
  2395   _hrs->iterate_from(r, cl);
  2398 void
  2399 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
  2400   _hrs->iterate_from(idx, cl);
  2403 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
  2405 void
  2406 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  2407                                                  int worker,
  2408                                                  jint claim_value) {
  2409   const size_t regions = n_regions();
  2410   const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1);
  2411   // try to spread out the starting points of the workers
  2412   const size_t start_index = regions / worker_num * (size_t) worker;
  2414   // each worker will actually look at all regions
  2415   for (size_t count = 0; count < regions; ++count) {
  2416     const size_t index = (start_index + count) % regions;
  2417     assert(0 <= index && index < regions, "sanity");
  2418     HeapRegion* r = region_at(index);
  2419     // we'll ignore "continues humongous" regions (we'll process them
  2420     // when we come across their corresponding "start humongous"
  2421     // region) and regions already claimed
  2422     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  2423       continue;
  2425     // OK, try to claim it
  2426     if (r->claimHeapRegion(claim_value)) {
  2427       // success!
  2428       assert(!r->continuesHumongous(), "sanity");
  2429       if (r->startsHumongous()) {
  2430         // If the region is "starts humongous" we'll iterate over its
  2431         // "continues humongous" first; in fact we'll do them
  2432         // first. The order is important. In on case, calling the
  2433         // closure on the "starts humongous" region might de-allocate
  2434         // and clear all its "continues humongous" regions and, as a
  2435         // result, we might end up processing them twice. So, we'll do
  2436         // them first (notice: most closures will ignore them anyway) and
  2437         // then we'll do the "starts humongous" region.
  2438         for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
  2439           HeapRegion* chr = region_at(ch_index);
  2441           // if the region has already been claimed or it's not
  2442           // "continues humongous" we're done
  2443           if (chr->claim_value() == claim_value ||
  2444               !chr->continuesHumongous()) {
  2445             break;
  2448           // Noone should have claimed it directly. We can given
  2449           // that we claimed its "starts humongous" region.
  2450           assert(chr->claim_value() != claim_value, "sanity");
  2451           assert(chr->humongous_start_region() == r, "sanity");
  2453           if (chr->claimHeapRegion(claim_value)) {
  2454             // we should always be able to claim it; noone else should
  2455             // be trying to claim this region
  2457             bool res2 = cl->doHeapRegion(chr);
  2458             assert(!res2, "Should not abort");
  2460             // Right now, this holds (i.e., no closure that actually
  2461             // does something with "continues humongous" regions
  2462             // clears them). We might have to weaken it in the future,
  2463             // but let's leave these two asserts here for extra safety.
  2464             assert(chr->continuesHumongous(), "should still be the case");
  2465             assert(chr->humongous_start_region() == r, "sanity");
  2466           } else {
  2467             guarantee(false, "we should not reach here");
  2472       assert(!r->continuesHumongous(), "sanity");
  2473       bool res = cl->doHeapRegion(r);
  2474       assert(!res, "Should not abort");
  2479 class ResetClaimValuesClosure: public HeapRegionClosure {
  2480 public:
  2481   bool doHeapRegion(HeapRegion* r) {
  2482     r->set_claim_value(HeapRegion::InitialClaimValue);
  2483     return false;
  2485 };
  2487 void
  2488 G1CollectedHeap::reset_heap_region_claim_values() {
  2489   ResetClaimValuesClosure blk;
  2490   heap_region_iterate(&blk);
  2493 #ifdef ASSERT
  2494 // This checks whether all regions in the heap have the correct claim
  2495 // value. I also piggy-backed on this a check to ensure that the
  2496 // humongous_start_region() information on "continues humongous"
  2497 // regions is correct.
  2499 class CheckClaimValuesClosure : public HeapRegionClosure {
  2500 private:
  2501   jint _claim_value;
  2502   size_t _failures;
  2503   HeapRegion* _sh_region;
  2504 public:
  2505   CheckClaimValuesClosure(jint claim_value) :
  2506     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  2507   bool doHeapRegion(HeapRegion* r) {
  2508     if (r->claim_value() != _claim_value) {
  2509       gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  2510                              "claim value = %d, should be %d",
  2511                              r->bottom(), r->end(), r->claim_value(),
  2512                              _claim_value);
  2513       ++_failures;
  2515     if (!r->isHumongous()) {
  2516       _sh_region = NULL;
  2517     } else if (r->startsHumongous()) {
  2518       _sh_region = r;
  2519     } else if (r->continuesHumongous()) {
  2520       if (r->humongous_start_region() != _sh_region) {
  2521         gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  2522                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  2523                                r->bottom(), r->end(),
  2524                                r->humongous_start_region(),
  2525                                _sh_region);
  2526         ++_failures;
  2529     return false;
  2531   size_t failures() {
  2532     return _failures;
  2534 };
  2536 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  2537   CheckClaimValuesClosure cl(claim_value);
  2538   heap_region_iterate(&cl);
  2539   return cl.failures() == 0;
  2541 #endif // ASSERT
  2543 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2544   HeapRegion* r = g1_policy()->collection_set();
  2545   while (r != NULL) {
  2546     HeapRegion* next = r->next_in_collection_set();
  2547     if (cl->doHeapRegion(r)) {
  2548       cl->incomplete();
  2549       return;
  2551     r = next;
  2555 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2556                                                   HeapRegionClosure *cl) {
  2557   if (r == NULL) {
  2558     // The CSet is empty so there's nothing to do.
  2559     return;
  2562   assert(r->in_collection_set(),
  2563          "Start region must be a member of the collection set.");
  2564   HeapRegion* cur = r;
  2565   while (cur != NULL) {
  2566     HeapRegion* next = cur->next_in_collection_set();
  2567     if (cl->doHeapRegion(cur) && false) {
  2568       cl->incomplete();
  2569       return;
  2571     cur = next;
  2573   cur = g1_policy()->collection_set();
  2574   while (cur != r) {
  2575     HeapRegion* next = cur->next_in_collection_set();
  2576     if (cl->doHeapRegion(cur) && false) {
  2577       cl->incomplete();
  2578       return;
  2580     cur = next;
  2584 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
  2585   return _hrs->length() > 0 ? _hrs->at(0) : NULL;
  2589 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2590   Space* res = heap_region_containing(addr);
  2591   if (res == NULL)
  2592     res = perm_gen()->space_containing(addr);
  2593   return res;
  2596 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2597   Space* sp = space_containing(addr);
  2598   if (sp != NULL) {
  2599     return sp->block_start(addr);
  2601   return NULL;
  2604 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  2605   Space* sp = space_containing(addr);
  2606   assert(sp != NULL, "block_size of address outside of heap");
  2607   return sp->block_size(addr);
  2610 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  2611   Space* sp = space_containing(addr);
  2612   return sp->block_is_obj(addr);
  2615 bool G1CollectedHeap::supports_tlab_allocation() const {
  2616   return true;
  2619 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  2620   return HeapRegion::GrainBytes;
  2623 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  2624   // Return the remaining space in the cur alloc region, but not less than
  2625   // the min TLAB size.
  2627   // Also, this value can be at most the humongous object threshold,
  2628   // since we can't allow tlabs to grow big enough to accomodate
  2629   // humongous objects.
  2631   // We need to store the cur alloc region locally, since it might change
  2632   // between when we test for NULL and when we use it later.
  2633   ContiguousSpace* cur_alloc_space = _cur_alloc_region;
  2634   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
  2636   if (cur_alloc_space == NULL) {
  2637     return max_tlab_size;
  2638   } else {
  2639     return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
  2640                 max_tlab_size);
  2644 bool G1CollectedHeap::allocs_are_zero_filled() {
  2645   return false;
  2648 size_t G1CollectedHeap::large_typearray_limit() {
  2649   // FIXME
  2650   return HeapRegion::GrainBytes/HeapWordSize;
  2653 size_t G1CollectedHeap::max_capacity() const {
  2654   return g1_reserved_obj_bytes();
  2657 jlong G1CollectedHeap::millis_since_last_gc() {
  2658   // assert(false, "NYI");
  2659   return 0;
  2663 void G1CollectedHeap::prepare_for_verify() {
  2664   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2665     ensure_parsability(false);
  2667   g1_rem_set()->prepare_for_verify();
  2670 class VerifyLivenessOopClosure: public OopClosure {
  2671   G1CollectedHeap* g1h;
  2672 public:
  2673   VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
  2674     g1h = _g1h;
  2676   void do_oop(narrowOop *p) { do_oop_work(p); }
  2677   void do_oop(      oop *p) { do_oop_work(p); }
  2679   template <class T> void do_oop_work(T *p) {
  2680     oop obj = oopDesc::load_decode_heap_oop(p);
  2681     guarantee(obj == NULL || !g1h->is_obj_dead(obj),
  2682               "Dead object referenced by a not dead object");
  2684 };
  2686 class VerifyObjsInRegionClosure: public ObjectClosure {
  2687 private:
  2688   G1CollectedHeap* _g1h;
  2689   size_t _live_bytes;
  2690   HeapRegion *_hr;
  2691   bool _use_prev_marking;
  2692 public:
  2693   // use_prev_marking == true  -> use "prev" marking information,
  2694   // use_prev_marking == false -> use "next" marking information
  2695   VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking)
  2696     : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) {
  2697     _g1h = G1CollectedHeap::heap();
  2699   void do_object(oop o) {
  2700     VerifyLivenessOopClosure isLive(_g1h);
  2701     assert(o != NULL, "Huh?");
  2702     if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
  2703       o->oop_iterate(&isLive);
  2704       if (!_hr->obj_allocated_since_prev_marking(o)) {
  2705         size_t obj_size = o->size();    // Make sure we don't overflow
  2706         _live_bytes += (obj_size * HeapWordSize);
  2710   size_t live_bytes() { return _live_bytes; }
  2711 };
  2713 class PrintObjsInRegionClosure : public ObjectClosure {
  2714   HeapRegion *_hr;
  2715   G1CollectedHeap *_g1;
  2716 public:
  2717   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  2718     _g1 = G1CollectedHeap::heap();
  2719   };
  2721   void do_object(oop o) {
  2722     if (o != NULL) {
  2723       HeapWord *start = (HeapWord *) o;
  2724       size_t word_sz = o->size();
  2725       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  2726                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  2727                           (void*) o, word_sz,
  2728                           _g1->isMarkedPrev(o),
  2729                           _g1->isMarkedNext(o),
  2730                           _hr->obj_allocated_since_prev_marking(o));
  2731       HeapWord *end = start + word_sz;
  2732       HeapWord *cur;
  2733       int *val;
  2734       for (cur = start; cur < end; cur++) {
  2735         val = (int *) cur;
  2736         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  2740 };
  2742 class VerifyRegionClosure: public HeapRegionClosure {
  2743 private:
  2744   bool _allow_dirty;
  2745   bool _par;
  2746   bool _use_prev_marking;
  2747   bool _failures;
  2748 public:
  2749   // use_prev_marking == true  -> use "prev" marking information,
  2750   // use_prev_marking == false -> use "next" marking information
  2751   VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
  2752     : _allow_dirty(allow_dirty),
  2753       _par(par),
  2754       _use_prev_marking(use_prev_marking),
  2755       _failures(false) {}
  2757   bool failures() {
  2758     return _failures;
  2761   bool doHeapRegion(HeapRegion* r) {
  2762     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
  2763               "Should be unclaimed at verify points.");
  2764     if (!r->continuesHumongous()) {
  2765       bool failures = false;
  2766       r->verify(_allow_dirty, _use_prev_marking, &failures);
  2767       if (failures) {
  2768         _failures = true;
  2769       } else {
  2770         VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
  2771         r->object_iterate(&not_dead_yet_cl);
  2772         if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
  2773           gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
  2774                                  "max_live_bytes "SIZE_FORMAT" "
  2775                                  "< calculated "SIZE_FORMAT,
  2776                                  r->bottom(), r->end(),
  2777                                  r->max_live_bytes(),
  2778                                  not_dead_yet_cl.live_bytes());
  2779           _failures = true;
  2783     return false; // stop the region iteration if we hit a failure
  2785 };
  2787 class VerifyRootsClosure: public OopsInGenClosure {
  2788 private:
  2789   G1CollectedHeap* _g1h;
  2790   bool             _use_prev_marking;
  2791   bool             _failures;
  2792 public:
  2793   // use_prev_marking == true  -> use "prev" marking information,
  2794   // use_prev_marking == false -> use "next" marking information
  2795   VerifyRootsClosure(bool use_prev_marking) :
  2796     _g1h(G1CollectedHeap::heap()),
  2797     _use_prev_marking(use_prev_marking),
  2798     _failures(false) { }
  2800   bool failures() { return _failures; }
  2802   template <class T> void do_oop_nv(T* p) {
  2803     T heap_oop = oopDesc::load_heap_oop(p);
  2804     if (!oopDesc::is_null(heap_oop)) {
  2805       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2806       if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
  2807         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  2808                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
  2809         obj->print_on(gclog_or_tty);
  2810         _failures = true;
  2815   void do_oop(oop* p)       { do_oop_nv(p); }
  2816   void do_oop(narrowOop* p) { do_oop_nv(p); }
  2817 };
  2819 // This is the task used for parallel heap verification.
  2821 class G1ParVerifyTask: public AbstractGangTask {
  2822 private:
  2823   G1CollectedHeap* _g1h;
  2824   bool _allow_dirty;
  2825   bool _use_prev_marking;
  2826   bool _failures;
  2828 public:
  2829   // use_prev_marking == true  -> use "prev" marking information,
  2830   // use_prev_marking == false -> use "next" marking information
  2831   G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
  2832                   bool use_prev_marking) :
  2833     AbstractGangTask("Parallel verify task"),
  2834     _g1h(g1h),
  2835     _allow_dirty(allow_dirty),
  2836     _use_prev_marking(use_prev_marking),
  2837     _failures(false) { }
  2839   bool failures() {
  2840     return _failures;
  2843   void work(int worker_i) {
  2844     HandleMark hm;
  2845     VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
  2846     _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
  2847                                           HeapRegion::ParVerifyClaimValue);
  2848     if (blk.failures()) {
  2849       _failures = true;
  2852 };
  2854 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
  2855   verify(allow_dirty, silent, /* use_prev_marking */ true);
  2858 void G1CollectedHeap::verify(bool allow_dirty,
  2859                              bool silent,
  2860                              bool use_prev_marking) {
  2861   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2862     if (!silent) { gclog_or_tty->print("roots "); }
  2863     VerifyRootsClosure rootsCl(use_prev_marking);
  2864     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
  2865     process_strong_roots(true,  // activate StrongRootsScope
  2866                          false,
  2867                          SharedHeap::SO_AllClasses,
  2868                          &rootsCl,
  2869                          &blobsCl,
  2870                          &rootsCl);
  2871     bool failures = rootsCl.failures();
  2872     rem_set()->invalidate(perm_gen()->used_region(), false);
  2873     if (!silent) { gclog_or_tty->print("heapRegions "); }
  2874     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  2875       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2876              "sanity check");
  2878       G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
  2879       int n_workers = workers()->total_workers();
  2880       set_par_threads(n_workers);
  2881       workers()->run_task(&task);
  2882       set_par_threads(0);
  2883       if (task.failures()) {
  2884         failures = true;
  2887       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  2888              "sanity check");
  2890       reset_heap_region_claim_values();
  2892       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2893              "sanity check");
  2894     } else {
  2895       VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
  2896       _hrs->iterate(&blk);
  2897       if (blk.failures()) {
  2898         failures = true;
  2901     if (!silent) gclog_or_tty->print("remset ");
  2902     rem_set()->verify();
  2904     if (failures) {
  2905       gclog_or_tty->print_cr("Heap:");
  2906       print_on(gclog_or_tty, true /* extended */);
  2907       gclog_or_tty->print_cr("");
  2908 #ifndef PRODUCT
  2909       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
  2910         concurrent_mark()->print_reachable("at-verification-failure",
  2911                                            use_prev_marking, false /* all */);
  2913 #endif
  2914       gclog_or_tty->flush();
  2916     guarantee(!failures, "there should not have been any failures");
  2917   } else {
  2918     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
  2922 class PrintRegionClosure: public HeapRegionClosure {
  2923   outputStream* _st;
  2924 public:
  2925   PrintRegionClosure(outputStream* st) : _st(st) {}
  2926   bool doHeapRegion(HeapRegion* r) {
  2927     r->print_on(_st);
  2928     return false;
  2930 };
  2932 void G1CollectedHeap::print() const { print_on(tty); }
  2934 void G1CollectedHeap::print_on(outputStream* st) const {
  2935   print_on(st, PrintHeapAtGCExtended);
  2938 void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
  2939   st->print(" %-20s", "garbage-first heap");
  2940   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  2941             capacity()/K, used_unlocked()/K);
  2942   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  2943             _g1_storage.low_boundary(),
  2944             _g1_storage.high(),
  2945             _g1_storage.high_boundary());
  2946   st->cr();
  2947   st->print("  region size " SIZE_FORMAT "K, ",
  2948             HeapRegion::GrainBytes/K);
  2949   size_t young_regions = _young_list->length();
  2950   st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
  2951             young_regions, young_regions * HeapRegion::GrainBytes / K);
  2952   size_t survivor_regions = g1_policy()->recorded_survivor_regions();
  2953   st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
  2954             survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
  2955   st->cr();
  2956   perm()->as_gen()->print_on(st);
  2957   if (extended) {
  2958     st->cr();
  2959     print_on_extended(st);
  2963 void G1CollectedHeap::print_on_extended(outputStream* st) const {
  2964   PrintRegionClosure blk(st);
  2965   _hrs->iterate(&blk);
  2968 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  2969   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2970     workers()->print_worker_threads_on(st);
  2973   _cmThread->print_on(st);
  2974   st->cr();
  2976   _cm->print_worker_threads_on(st);
  2978   _cg1r->print_worker_threads_on(st);
  2980   _czft->print_on(st);
  2981   st->cr();
  2984 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  2985   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2986     workers()->threads_do(tc);
  2988   tc->do_thread(_cmThread);
  2989   _cg1r->threads_do(tc);
  2990   tc->do_thread(_czft);
  2993 void G1CollectedHeap::print_tracing_info() const {
  2994   // We'll overload this to mean "trace GC pause statistics."
  2995   if (TraceGen0Time || TraceGen1Time) {
  2996     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  2997     // to that.
  2998     g1_policy()->print_tracing_info();
  3000   if (G1SummarizeRSetStats) {
  3001     g1_rem_set()->print_summary_info();
  3003   if (G1SummarizeConcMark) {
  3004     concurrent_mark()->print_summary_info();
  3006   if (G1SummarizeZFStats) {
  3007     ConcurrentZFThread::print_summary_info();
  3009   g1_policy()->print_yg_surv_rate_info();
  3011   SpecializationStats::print();
  3015 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
  3016   HeapRegion* hr = heap_region_containing(addr);
  3017   if (hr == NULL) {
  3018     return 0;
  3019   } else {
  3020     return 1;
  3024 G1CollectedHeap* G1CollectedHeap::heap() {
  3025   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  3026          "not a garbage-first heap");
  3027   return _g1h;
  3030 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  3031   // always_do_update_barrier = false;
  3032   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  3033   // Call allocation profiler
  3034   AllocationProfiler::iterate_since_last_gc();
  3035   // Fill TLAB's and such
  3036   ensure_parsability(true);
  3039 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  3040   // FIXME: what is this about?
  3041   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  3042   // is set.
  3043   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  3044                         "derived pointer present"));
  3045   // always_do_update_barrier = true;
  3048 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
  3049                                                unsigned int gc_count_before,
  3050                                                bool* succeeded) {
  3051   assert_heap_not_locked_and_not_at_safepoint();
  3052   g1_policy()->record_stop_world_start();
  3053   VM_G1IncCollectionPause op(gc_count_before,
  3054                              word_size,
  3055                              false, /* should_initiate_conc_mark */
  3056                              g1_policy()->max_pause_time_ms(),
  3057                              GCCause::_g1_inc_collection_pause);
  3058   VMThread::execute(&op);
  3060   HeapWord* result = op.result();
  3061   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
  3062   assert(result == NULL || ret_succeeded,
  3063          "the result should be NULL if the VM did not succeed");
  3064   *succeeded = ret_succeeded;
  3066   assert_heap_not_locked();
  3067   return result;
  3070 void
  3071 G1CollectedHeap::doConcurrentMark() {
  3072   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  3073   if (!_cmThread->in_progress()) {
  3074     _cmThread->set_started();
  3075     CGC_lock->notify();
  3079 class VerifyMarkedObjsClosure: public ObjectClosure {
  3080     G1CollectedHeap* _g1h;
  3081     public:
  3082     VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
  3083     void do_object(oop obj) {
  3084       assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
  3085              "markandsweep mark should agree with concurrent deadness");
  3087 };
  3089 void
  3090 G1CollectedHeap::checkConcurrentMark() {
  3091     VerifyMarkedObjsClosure verifycl(this);
  3092     //    MutexLockerEx x(getMarkBitMapLock(),
  3093     //              Mutex::_no_safepoint_check_flag);
  3094     object_iterate(&verifycl, false);
  3097 void G1CollectedHeap::do_sync_mark() {
  3098   _cm->checkpointRootsInitial();
  3099   _cm->markFromRoots();
  3100   _cm->checkpointRootsFinal(false);
  3103 // <NEW PREDICTION>
  3105 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
  3106                                                        bool young) {
  3107   return _g1_policy->predict_region_elapsed_time_ms(hr, young);
  3110 void G1CollectedHeap::check_if_region_is_too_expensive(double
  3111                                                            predicted_time_ms) {
  3112   _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
  3115 size_t G1CollectedHeap::pending_card_num() {
  3116   size_t extra_cards = 0;
  3117   JavaThread *curr = Threads::first();
  3118   while (curr != NULL) {
  3119     DirtyCardQueue& dcq = curr->dirty_card_queue();
  3120     extra_cards += dcq.size();
  3121     curr = curr->next();
  3123   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3124   size_t buffer_size = dcqs.buffer_size();
  3125   size_t buffer_num = dcqs.completed_buffers_num();
  3126   return buffer_size * buffer_num + extra_cards;
  3129 size_t G1CollectedHeap::max_pending_card_num() {
  3130   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  3131   size_t buffer_size = dcqs.buffer_size();
  3132   size_t buffer_num  = dcqs.completed_buffers_num();
  3133   int thread_num  = Threads::number_of_threads();
  3134   return (buffer_num + thread_num) * buffer_size;
  3137 size_t G1CollectedHeap::cards_scanned() {
  3138   return g1_rem_set()->cardsScanned();
  3141 void
  3142 G1CollectedHeap::setup_surviving_young_words() {
  3143   guarantee( _surviving_young_words == NULL, "pre-condition" );
  3144   size_t array_length = g1_policy()->young_cset_length();
  3145   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
  3146   if (_surviving_young_words == NULL) {
  3147     vm_exit_out_of_memory(sizeof(size_t) * array_length,
  3148                           "Not enough space for young surv words summary.");
  3150   memset(_surviving_young_words, 0, array_length * sizeof(size_t));
  3151 #ifdef ASSERT
  3152   for (size_t i = 0;  i < array_length; ++i) {
  3153     assert( _surviving_young_words[i] == 0, "memset above" );
  3155 #endif // !ASSERT
  3158 void
  3159 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  3160   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3161   size_t array_length = g1_policy()->young_cset_length();
  3162   for (size_t i = 0; i < array_length; ++i)
  3163     _surviving_young_words[i] += surv_young_words[i];
  3166 void
  3167 G1CollectedHeap::cleanup_surviving_young_words() {
  3168   guarantee( _surviving_young_words != NULL, "pre-condition" );
  3169   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
  3170   _surviving_young_words = NULL;
  3173 // </NEW PREDICTION>
  3175 struct PrepareForRSScanningClosure : public HeapRegionClosure {
  3176   bool doHeapRegion(HeapRegion *r) {
  3177     r->rem_set()->set_iter_claimed(0);
  3178     return false;
  3180 };
  3182 #if TASKQUEUE_STATS
  3183 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
  3184   st->print_raw_cr("GC Task Stats");
  3185   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
  3186   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
  3189 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
  3190   print_taskqueue_stats_hdr(st);
  3192   TaskQueueStats totals;
  3193   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3194   for (int i = 0; i < n; ++i) {
  3195     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
  3196     totals += task_queue(i)->stats;
  3198   st->print_raw("tot "); totals.print(st); st->cr();
  3200   DEBUG_ONLY(totals.verify());
  3203 void G1CollectedHeap::reset_taskqueue_stats() {
  3204   const int n = workers() != NULL ? workers()->total_workers() : 1;
  3205   for (int i = 0; i < n; ++i) {
  3206     task_queue(i)->stats.reset();
  3209 #endif // TASKQUEUE_STATS
  3211 bool
  3212 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
  3213   if (GC_locker::check_active_before_gc()) {
  3214     return false;
  3217   SvcGCMarker sgcm(SvcGCMarker::MINOR);
  3218   ResourceMark rm;
  3220   if (PrintHeapAtGC) {
  3221     Universe::print_heap_before_gc();
  3225     // This call will decide whether this pause is an initial-mark
  3226     // pause. If it is, during_initial_mark_pause() will return true
  3227     // for the duration of this pause.
  3228     g1_policy()->decide_on_conc_mark_initiation();
  3230     char verbose_str[128];
  3231     sprintf(verbose_str, "GC pause ");
  3232     if (g1_policy()->in_young_gc_mode()) {
  3233       if (g1_policy()->full_young_gcs())
  3234         strcat(verbose_str, "(young)");
  3235       else
  3236         strcat(verbose_str, "(partial)");
  3238     if (g1_policy()->during_initial_mark_pause()) {
  3239       strcat(verbose_str, " (initial-mark)");
  3240       // We are about to start a marking cycle, so we increment the
  3241       // full collection counter.
  3242       increment_total_full_collections();
  3245     // if PrintGCDetails is on, we'll print long statistics information
  3246     // in the collector policy code, so let's not print this as the output
  3247     // is messy if we do.
  3248     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  3249     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  3250     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
  3252     TraceMemoryManagerStats tms(false /* fullGC */);
  3254     assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  3255     assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
  3256     guarantee(!is_gc_active(), "collection is not reentrant");
  3257     assert(regions_accounted_for(), "Region leakage!");
  3259     increment_gc_time_stamp();
  3261     if (g1_policy()->in_young_gc_mode()) {
  3262       assert(check_young_list_well_formed(),
  3263              "young list should be well formed");
  3266     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  3267       IsGCActiveMark x;
  3269       gc_prologue(false);
  3270       increment_total_collections(false /* full gc */);
  3272 #if G1_REM_SET_LOGGING
  3273       gclog_or_tty->print_cr("\nJust chose CS, heap:");
  3274       print();
  3275 #endif
  3277       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  3278         HandleMark hm;  // Discard invalid handles created during verification
  3279         prepare_for_verify();
  3280         gclog_or_tty->print(" VerifyBeforeGC:");
  3281         Universe::verify(false);
  3284       COMPILER2_PRESENT(DerivedPointerTable::clear());
  3286       // Please see comment in G1CollectedHeap::ref_processing_init()
  3287       // to see how reference processing currently works in G1.
  3288       //
  3289       // We want to turn off ref discovery, if necessary, and turn it back on
  3290       // on again later if we do. XXX Dubious: why is discovery disabled?
  3291       bool was_enabled = ref_processor()->discovery_enabled();
  3292       if (was_enabled) ref_processor()->disable_discovery();
  3294       // Forget the current alloc region (we might even choose it to be part
  3295       // of the collection set!).
  3296       abandon_cur_alloc_region();
  3298       // The elapsed time induced by the start time below deliberately elides
  3299       // the possible verification above.
  3300       double start_time_sec = os::elapsedTime();
  3301       size_t start_used_bytes = used();
  3303 #if YOUNG_LIST_VERBOSE
  3304       gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  3305       _young_list->print();
  3306       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3307 #endif // YOUNG_LIST_VERBOSE
  3309       g1_policy()->record_collection_pause_start(start_time_sec,
  3310                                                  start_used_bytes);
  3312 #if YOUNG_LIST_VERBOSE
  3313       gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
  3314       _young_list->print();
  3315 #endif // YOUNG_LIST_VERBOSE
  3317       if (g1_policy()->during_initial_mark_pause()) {
  3318         concurrent_mark()->checkpointRootsInitialPre();
  3320       save_marks();
  3322       // We must do this before any possible evacuation that should propagate
  3323       // marks.
  3324       if (mark_in_progress()) {
  3325         double start_time_sec = os::elapsedTime();
  3327         _cm->drainAllSATBBuffers();
  3328         double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  3329         g1_policy()->record_satb_drain_time(finish_mark_ms);
  3331       // Record the number of elements currently on the mark stack, so we
  3332       // only iterate over these.  (Since evacuation may add to the mark
  3333       // stack, doing more exposes race conditions.)  If no mark is in
  3334       // progress, this will be zero.
  3335       _cm->set_oops_do_bound();
  3337       assert(regions_accounted_for(), "Region leakage.");
  3339       if (mark_in_progress())
  3340         concurrent_mark()->newCSet();
  3342 #if YOUNG_LIST_VERBOSE
  3343       gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
  3344       _young_list->print();
  3345       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3346 #endif // YOUNG_LIST_VERBOSE
  3348       g1_policy()->choose_collection_set(target_pause_time_ms);
  3350       // Nothing to do if we were unable to choose a collection set.
  3351 #if G1_REM_SET_LOGGING
  3352       gclog_or_tty->print_cr("\nAfter pause, heap:");
  3353       print();
  3354 #endif
  3355       PrepareForRSScanningClosure prepare_for_rs_scan;
  3356       collection_set_iterate(&prepare_for_rs_scan);
  3358       setup_surviving_young_words();
  3360       // Set up the gc allocation regions.
  3361       get_gc_alloc_regions();
  3363       // Actually do the work...
  3364       evacuate_collection_set();
  3366       free_collection_set(g1_policy()->collection_set());
  3367       g1_policy()->clear_collection_set();
  3369       cleanup_surviving_young_words();
  3371       // Start a new incremental collection set for the next pause.
  3372       g1_policy()->start_incremental_cset_building();
  3374       // Clear the _cset_fast_test bitmap in anticipation of adding
  3375       // regions to the incremental collection set for the next
  3376       // evacuation pause.
  3377       clear_cset_fast_test();
  3379       if (g1_policy()->in_young_gc_mode()) {
  3380         _young_list->reset_sampled_info();
  3382         // Don't check the whole heap at this point as the
  3383         // GC alloc regions from this pause have been tagged
  3384         // as survivors and moved on to the survivor list.
  3385         // Survivor regions will fail the !is_young() check.
  3386         assert(check_young_list_empty(false /* check_heap */),
  3387                "young list should be empty");
  3389 #if YOUNG_LIST_VERBOSE
  3390         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
  3391         _young_list->print();
  3392 #endif // YOUNG_LIST_VERBOSE
  3394         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  3395                                           _young_list->first_survivor_region(),
  3396                                           _young_list->last_survivor_region());
  3398         _young_list->reset_auxilary_lists();
  3401       if (evacuation_failed()) {
  3402         _summary_bytes_used = recalculate_used();
  3403       } else {
  3404         // The "used" of the the collection set have already been subtracted
  3405         // when they were freed.  Add in the bytes evacuated.
  3406         _summary_bytes_used += g1_policy()->bytes_in_to_space();
  3409       if (g1_policy()->in_young_gc_mode() &&
  3410           g1_policy()->during_initial_mark_pause()) {
  3411         concurrent_mark()->checkpointRootsInitialPost();
  3412         set_marking_started();
  3413         // CAUTION: after the doConcurrentMark() call below,
  3414         // the concurrent marking thread(s) could be running
  3415         // concurrently with us. Make sure that anything after
  3416         // this point does not assume that we are the only GC thread
  3417         // running. Note: of course, the actual marking work will
  3418         // not start until the safepoint itself is released in
  3419         // ConcurrentGCThread::safepoint_desynchronize().
  3420         doConcurrentMark();
  3423 #if YOUNG_LIST_VERBOSE
  3424       gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  3425       _young_list->print();
  3426       g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  3427 #endif // YOUNG_LIST_VERBOSE
  3429       double end_time_sec = os::elapsedTime();
  3430       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  3431       g1_policy()->record_pause_time_ms(pause_time_ms);
  3432       g1_policy()->record_collection_pause_end();
  3434       assert(regions_accounted_for(), "Region leakage.");
  3436       MemoryService::track_memory_usage();
  3438       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  3439         HandleMark hm;  // Discard invalid handles created during verification
  3440         gclog_or_tty->print(" VerifyAfterGC:");
  3441         prepare_for_verify();
  3442         Universe::verify(false);
  3445       if (was_enabled) ref_processor()->enable_discovery();
  3448         size_t expand_bytes = g1_policy()->expansion_amount();
  3449         if (expand_bytes > 0) {
  3450           size_t bytes_before = capacity();
  3451           expand(expand_bytes);
  3455       if (mark_in_progress()) {
  3456         concurrent_mark()->update_g1_committed();
  3459 #ifdef TRACESPINNING
  3460       ParallelTaskTerminator::print_termination_counts();
  3461 #endif
  3463       gc_epilogue(false);
  3466     assert(verify_region_lists(), "Bad region lists.");
  3468     if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
  3469       gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
  3470       print_tracing_info();
  3471       vm_exit(-1);
  3475   TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
  3476   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  3478   if (PrintHeapAtGC) {
  3479     Universe::print_heap_after_gc();
  3481   if (G1SummarizeRSetStats &&
  3482       (G1SummarizeRSetStatsPeriod > 0) &&
  3483       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  3484     g1_rem_set()->print_summary_info();
  3487   return true;
  3490 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
  3492   size_t gclab_word_size;
  3493   switch (purpose) {
  3494     case GCAllocForSurvived:
  3495       gclab_word_size = YoungPLABSize;
  3496       break;
  3497     case GCAllocForTenured:
  3498       gclab_word_size = OldPLABSize;
  3499       break;
  3500     default:
  3501       assert(false, "unknown GCAllocPurpose");
  3502       gclab_word_size = OldPLABSize;
  3503       break;
  3505   return gclab_word_size;
  3509 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
  3510   assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
  3511   // make sure we don't call set_gc_alloc_region() multiple times on
  3512   // the same region
  3513   assert(r == NULL || !r->is_gc_alloc_region(),
  3514          "shouldn't already be a GC alloc region");
  3515   assert(r == NULL || !r->isHumongous(),
  3516          "humongous regions shouldn't be used as GC alloc regions");
  3518   HeapWord* original_top = NULL;
  3519   if (r != NULL)
  3520     original_top = r->top();
  3522   // We will want to record the used space in r as being there before gc.
  3523   // One we install it as a GC alloc region it's eligible for allocation.
  3524   // So record it now and use it later.
  3525   size_t r_used = 0;
  3526   if (r != NULL) {
  3527     r_used = r->used();
  3529     if (G1CollectedHeap::use_parallel_gc_threads()) {
  3530       // need to take the lock to guard against two threads calling
  3531       // get_gc_alloc_region concurrently (very unlikely but...)
  3532       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  3533       r->save_marks();
  3536   HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
  3537   _gc_alloc_regions[purpose] = r;
  3538   if (old_alloc_region != NULL) {
  3539     // Replace aliases too.
  3540     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3541       if (_gc_alloc_regions[ap] == old_alloc_region) {
  3542         _gc_alloc_regions[ap] = r;
  3546   if (r != NULL) {
  3547     push_gc_alloc_region(r);
  3548     if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
  3549       // We are using a region as a GC alloc region after it has been used
  3550       // as a mutator allocation region during the current marking cycle.
  3551       // The mutator-allocated objects are currently implicitly marked, but
  3552       // when we move hr->next_top_at_mark_start() forward at the the end
  3553       // of the GC pause, they won't be.  We therefore mark all objects in
  3554       // the "gap".  We do this object-by-object, since marking densely
  3555       // does not currently work right with marking bitmap iteration.  This
  3556       // means we rely on TLAB filling at the start of pauses, and no
  3557       // "resuscitation" of filled TLAB's.  If we want to do this, we need
  3558       // to fix the marking bitmap iteration.
  3559       HeapWord* curhw = r->next_top_at_mark_start();
  3560       HeapWord* t = original_top;
  3562       while (curhw < t) {
  3563         oop cur = (oop)curhw;
  3564         // We'll assume parallel for generality.  This is rare code.
  3565         concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
  3566         curhw = curhw + cur->size();
  3568       assert(curhw == t, "Should have parsed correctly.");
  3570     if (G1PolicyVerbose > 1) {
  3571       gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
  3572                           "for survivors:", r->bottom(), original_top, r->end());
  3573       r->print();
  3575     g1_policy()->record_before_bytes(r_used);
  3579 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
  3580   assert(Thread::current()->is_VM_thread() ||
  3581          par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
  3582   assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
  3583          "Precondition.");
  3584   hr->set_is_gc_alloc_region(true);
  3585   hr->set_next_gc_alloc_region(_gc_alloc_region_list);
  3586   _gc_alloc_region_list = hr;
  3589 #ifdef G1_DEBUG
  3590 class FindGCAllocRegion: public HeapRegionClosure {
  3591 public:
  3592   bool doHeapRegion(HeapRegion* r) {
  3593     if (r->is_gc_alloc_region()) {
  3594       gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
  3595                              r->hrs_index(), r->bottom());
  3597     return false;
  3599 };
  3600 #endif // G1_DEBUG
  3602 void G1CollectedHeap::forget_alloc_region_list() {
  3603   assert(Thread::current()->is_VM_thread(), "Precondition");
  3604   while (_gc_alloc_region_list != NULL) {
  3605     HeapRegion* r = _gc_alloc_region_list;
  3606     assert(r->is_gc_alloc_region(), "Invariant.");
  3607     // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
  3608     // newly allocated data in order to be able to apply deferred updates
  3609     // before the GC is done for verification purposes (i.e to allow
  3610     // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
  3611     // collection.
  3612     r->ContiguousSpace::set_saved_mark();
  3613     _gc_alloc_region_list = r->next_gc_alloc_region();
  3614     r->set_next_gc_alloc_region(NULL);
  3615     r->set_is_gc_alloc_region(false);
  3616     if (r->is_survivor()) {
  3617       if (r->is_empty()) {
  3618         r->set_not_young();
  3619       } else {
  3620         _young_list->add_survivor_region(r);
  3623     if (r->is_empty()) {
  3624       ++_free_regions;
  3627 #ifdef G1_DEBUG
  3628   FindGCAllocRegion fa;
  3629   heap_region_iterate(&fa);
  3630 #endif // G1_DEBUG
  3634 bool G1CollectedHeap::check_gc_alloc_regions() {
  3635   // TODO: allocation regions check
  3636   return true;
  3639 void G1CollectedHeap::get_gc_alloc_regions() {
  3640   // First, let's check that the GC alloc region list is empty (it should)
  3641   assert(_gc_alloc_region_list == NULL, "invariant");
  3643   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3644     assert(_gc_alloc_regions[ap] == NULL, "invariant");
  3645     assert(_gc_alloc_region_counts[ap] == 0, "invariant");
  3647     // Create new GC alloc regions.
  3648     HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
  3649     _retained_gc_alloc_regions[ap] = NULL;
  3651     if (alloc_region != NULL) {
  3652       assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
  3654       // let's make sure that the GC alloc region is not tagged as such
  3655       // outside a GC operation
  3656       assert(!alloc_region->is_gc_alloc_region(), "sanity");
  3658       if (alloc_region->in_collection_set() ||
  3659           alloc_region->top() == alloc_region->end() ||
  3660           alloc_region->top() == alloc_region->bottom() ||
  3661           alloc_region->isHumongous()) {
  3662         // we will discard the current GC alloc region if
  3663         // * it's in the collection set (it can happen!),
  3664         // * it's already full (no point in using it),
  3665         // * it's empty (this means that it was emptied during
  3666         // a cleanup and it should be on the free list now), or
  3667         // * it's humongous (this means that it was emptied
  3668         // during a cleanup and was added to the free list, but
  3669         // has been subseqently used to allocate a humongous
  3670         // object that may be less than the region size).
  3672         alloc_region = NULL;
  3676     if (alloc_region == NULL) {
  3677       // we will get a new GC alloc region
  3678       alloc_region = newAllocRegionWithExpansion(ap, 0);
  3679     } else {
  3680       // the region was retained from the last collection
  3681       ++_gc_alloc_region_counts[ap];
  3682       if (G1PrintHeapRegions) {
  3683         gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
  3684                                "top "PTR_FORMAT,
  3685                                alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
  3689     if (alloc_region != NULL) {
  3690       assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
  3691       set_gc_alloc_region(ap, alloc_region);
  3694     assert(_gc_alloc_regions[ap] == NULL ||
  3695            _gc_alloc_regions[ap]->is_gc_alloc_region(),
  3696            "the GC alloc region should be tagged as such");
  3697     assert(_gc_alloc_regions[ap] == NULL ||
  3698            _gc_alloc_regions[ap] == _gc_alloc_region_list,
  3699            "the GC alloc region should be the same as the GC alloc list head");
  3701   // Set alternative regions for allocation purposes that have reached
  3702   // their limit.
  3703   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3704     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
  3705     if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
  3706       _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
  3709   assert(check_gc_alloc_regions(), "alloc regions messed up");
  3712 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
  3713   // We keep a separate list of all regions that have been alloc regions in
  3714   // the current collection pause. Forget that now. This method will
  3715   // untag the GC alloc regions and tear down the GC alloc region
  3716   // list. It's desirable that no regions are tagged as GC alloc
  3717   // outside GCs.
  3719   forget_alloc_region_list();
  3721   // The current alloc regions contain objs that have survived
  3722   // collection. Make them no longer GC alloc regions.
  3723   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3724     HeapRegion* r = _gc_alloc_regions[ap];
  3725     _retained_gc_alloc_regions[ap] = NULL;
  3726     _gc_alloc_region_counts[ap] = 0;
  3728     if (r != NULL) {
  3729       // we retain nothing on _gc_alloc_regions between GCs
  3730       set_gc_alloc_region(ap, NULL);
  3732       if (r->is_empty()) {
  3733         // we didn't actually allocate anything in it; let's just put
  3734         // it on the free list
  3735         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  3736         r->set_zero_fill_complete();
  3737         put_free_region_on_list_locked(r);
  3738       } else if (_retain_gc_alloc_region[ap] && !totally) {
  3739         // retain it so that we can use it at the beginning of the next GC
  3740         _retained_gc_alloc_regions[ap] = r;
  3746 #ifndef PRODUCT
  3747 // Useful for debugging
  3749 void G1CollectedHeap::print_gc_alloc_regions() {
  3750   gclog_or_tty->print_cr("GC alloc regions");
  3751   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3752     HeapRegion* r = _gc_alloc_regions[ap];
  3753     if (r == NULL) {
  3754       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT, ap, NULL);
  3755     } else {
  3756       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT" "SIZE_FORMAT,
  3757                              ap, r->bottom(), r->used());
  3761 #endif // PRODUCT
  3763 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  3764   _drain_in_progress = false;
  3765   set_evac_failure_closure(cl);
  3766   _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3769 void G1CollectedHeap::finalize_for_evac_failure() {
  3770   assert(_evac_failure_scan_stack != NULL &&
  3771          _evac_failure_scan_stack->length() == 0,
  3772          "Postcondition");
  3773   assert(!_drain_in_progress, "Postcondition");
  3774   delete _evac_failure_scan_stack;
  3775   _evac_failure_scan_stack = NULL;
  3780 // *** Sequential G1 Evacuation
  3782 class G1IsAliveClosure: public BoolObjectClosure {
  3783   G1CollectedHeap* _g1;
  3784 public:
  3785   G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3786   void do_object(oop p) { assert(false, "Do not call."); }
  3787   bool do_object_b(oop p) {
  3788     // It is reachable if it is outside the collection set, or is inside
  3789     // and forwarded.
  3791 #ifdef G1_DEBUG
  3792     gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
  3793                            (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
  3794                            !_g1->obj_in_cs(p) || p->is_forwarded());
  3795 #endif // G1_DEBUG
  3797     return !_g1->obj_in_cs(p) || p->is_forwarded();
  3799 };
  3801 class G1KeepAliveClosure: public OopClosure {
  3802   G1CollectedHeap* _g1;
  3803 public:
  3804   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3805   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  3806   void do_oop(      oop* p) {
  3807     oop obj = *p;
  3808 #ifdef G1_DEBUG
  3809     if (PrintGC && Verbose) {
  3810       gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
  3811                              p, (void*) obj, (void*) *p);
  3813 #endif // G1_DEBUG
  3815     if (_g1->obj_in_cs(obj)) {
  3816       assert( obj->is_forwarded(), "invariant" );
  3817       *p = obj->forwardee();
  3818 #ifdef G1_DEBUG
  3819       gclog_or_tty->print_cr("     in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
  3820                              (void*) obj, (void*) *p);
  3821 #endif // G1_DEBUG
  3824 };
  3826 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
  3827 private:
  3828   G1CollectedHeap* _g1;
  3829   DirtyCardQueue *_dcq;
  3830   CardTableModRefBS* _ct_bs;
  3832 public:
  3833   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
  3834     _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
  3836   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  3837   virtual void do_oop(      oop* p) { do_oop_work(p); }
  3838   template <class T> void do_oop_work(T* p) {
  3839     assert(_from->is_in_reserved(p), "paranoia");
  3840     if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
  3841         !_from->is_survivor()) {
  3842       size_t card_index = _ct_bs->index_for(p);
  3843       if (_ct_bs->mark_card_deferred(card_index)) {
  3844         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
  3848 };
  3850 class RemoveSelfPointerClosure: public ObjectClosure {
  3851 private:
  3852   G1CollectedHeap* _g1;
  3853   ConcurrentMark* _cm;
  3854   HeapRegion* _hr;
  3855   size_t _prev_marked_bytes;
  3856   size_t _next_marked_bytes;
  3857   OopsInHeapRegionClosure *_cl;
  3858 public:
  3859   RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
  3860     _g1(g1), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
  3861     _next_marked_bytes(0), _cl(cl) {}
  3863   size_t prev_marked_bytes() { return _prev_marked_bytes; }
  3864   size_t next_marked_bytes() { return _next_marked_bytes; }
  3866   // The original idea here was to coalesce evacuated and dead objects.
  3867   // However that caused complications with the block offset table (BOT).
  3868   // In particular if there were two TLABs, one of them partially refined.
  3869   // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
  3870   // The BOT entries of the unrefined part of TLAB_2 point to the start
  3871   // of TLAB_2. If the last object of the TLAB_1 and the first object
  3872   // of TLAB_2 are coalesced, then the cards of the unrefined part
  3873   // would point into middle of the filler object.
  3874   //
  3875   // The current approach is to not coalesce and leave the BOT contents intact.
  3876   void do_object(oop obj) {
  3877     if (obj->is_forwarded() && obj->forwardee() == obj) {
  3878       // The object failed to move.
  3879       assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
  3880       _cm->markPrev(obj);
  3881       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3882       _prev_marked_bytes += (obj->size() * HeapWordSize);
  3883       if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
  3884         _cm->markAndGrayObjectIfNecessary(obj);
  3886       obj->set_mark(markOopDesc::prototype());
  3887       // While we were processing RSet buffers during the
  3888       // collection, we actually didn't scan any cards on the
  3889       // collection set, since we didn't want to update remebered
  3890       // sets with entries that point into the collection set, given
  3891       // that live objects fromthe collection set are about to move
  3892       // and such entries will be stale very soon. This change also
  3893       // dealt with a reliability issue which involved scanning a
  3894       // card in the collection set and coming across an array that
  3895       // was being chunked and looking malformed. The problem is
  3896       // that, if evacuation fails, we might have remembered set
  3897       // entries missing given that we skipped cards on the
  3898       // collection set. So, we'll recreate such entries now.
  3899       obj->oop_iterate(_cl);
  3900       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3901     } else {
  3902       // The object has been either evacuated or is dead. Fill it with a
  3903       // dummy object.
  3904       MemRegion mr((HeapWord*)obj, obj->size());
  3905       CollectedHeap::fill_with_object(mr);
  3906       _cm->clearRangeBothMaps(mr);
  3909 };
  3911 void G1CollectedHeap::remove_self_forwarding_pointers() {
  3912   UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
  3913   DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
  3914   UpdateRSetDeferred deferred_update(_g1h, &dcq);
  3915   OopsInHeapRegionClosure *cl;
  3916   if (G1DeferredRSUpdate) {
  3917     cl = &deferred_update;
  3918   } else {
  3919     cl = &immediate_update;
  3921   HeapRegion* cur = g1_policy()->collection_set();
  3922   while (cur != NULL) {
  3923     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3925     RemoveSelfPointerClosure rspc(_g1h, cl);
  3926     if (cur->evacuation_failed()) {
  3927       assert(cur->in_collection_set(), "bad CS");
  3928       cl->set_region(cur);
  3929       cur->object_iterate(&rspc);
  3931       // A number of manipulations to make the TAMS be the current top,
  3932       // and the marked bytes be the ones observed in the iteration.
  3933       if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
  3934         // The comments below are the postconditions achieved by the
  3935         // calls.  Note especially the last such condition, which says that
  3936         // the count of marked bytes has been properly restored.
  3937         cur->note_start_of_marking(false);
  3938         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3939         cur->add_to_marked_bytes(rspc.prev_marked_bytes());
  3940         // _next_marked_bytes == prev_marked_bytes.
  3941         cur->note_end_of_marking();
  3942         // _prev_top_at_mark_start == top(),
  3943         // _prev_marked_bytes == prev_marked_bytes
  3945       // If there is no mark in progress, we modified the _next variables
  3946       // above needlessly, but harmlessly.
  3947       if (_g1h->mark_in_progress()) {
  3948         cur->note_start_of_marking(false);
  3949         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3950         // _next_marked_bytes == next_marked_bytes.
  3953       // Now make sure the region has the right index in the sorted array.
  3954       g1_policy()->note_change_in_marked_bytes(cur);
  3956     cur = cur->next_in_collection_set();
  3958   assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3960   // Now restore saved marks, if any.
  3961   if (_objs_with_preserved_marks != NULL) {
  3962     assert(_preserved_marks_of_objs != NULL, "Both or none.");
  3963     guarantee(_objs_with_preserved_marks->length() ==
  3964               _preserved_marks_of_objs->length(), "Both or none.");
  3965     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
  3966       oop obj   = _objs_with_preserved_marks->at(i);
  3967       markOop m = _preserved_marks_of_objs->at(i);
  3968       obj->set_mark(m);
  3970     // Delete the preserved marks growable arrays (allocated on the C heap).
  3971     delete _objs_with_preserved_marks;
  3972     delete _preserved_marks_of_objs;
  3973     _objs_with_preserved_marks = NULL;
  3974     _preserved_marks_of_objs = NULL;
  3978 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  3979   _evac_failure_scan_stack->push(obj);
  3982 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  3983   assert(_evac_failure_scan_stack != NULL, "precondition");
  3985   while (_evac_failure_scan_stack->length() > 0) {
  3986      oop obj = _evac_failure_scan_stack->pop();
  3987      _evac_failure_closure->set_region(heap_region_containing(obj));
  3988      obj->oop_iterate_backwards(_evac_failure_closure);
  3992 void G1CollectedHeap::handle_evacuation_failure(oop old) {
  3993   markOop m = old->mark();
  3994   // forward to self
  3995   assert(!old->is_forwarded(), "precondition");
  3997   old->forward_to(old);
  3998   handle_evacuation_failure_common(old, m);
  4001 oop
  4002 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  4003                                                oop old) {
  4004   markOop m = old->mark();
  4005   oop forward_ptr = old->forward_to_atomic(old);
  4006   if (forward_ptr == NULL) {
  4007     // Forward-to-self succeeded.
  4008     if (_evac_failure_closure != cl) {
  4009       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  4010       assert(!_drain_in_progress,
  4011              "Should only be true while someone holds the lock.");
  4012       // Set the global evac-failure closure to the current thread's.
  4013       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  4014       set_evac_failure_closure(cl);
  4015       // Now do the common part.
  4016       handle_evacuation_failure_common(old, m);
  4017       // Reset to NULL.
  4018       set_evac_failure_closure(NULL);
  4019     } else {
  4020       // The lock is already held, and this is recursive.
  4021       assert(_drain_in_progress, "This should only be the recursive case.");
  4022       handle_evacuation_failure_common(old, m);
  4024     return old;
  4025   } else {
  4026     // Someone else had a place to copy it.
  4027     return forward_ptr;
  4031 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  4032   set_evacuation_failed(true);
  4034   preserve_mark_if_necessary(old, m);
  4036   HeapRegion* r = heap_region_containing(old);
  4037   if (!r->evacuation_failed()) {
  4038     r->set_evacuation_failed(true);
  4039     if (G1PrintHeapRegions) {
  4040       gclog_or_tty->print("overflow in heap region "PTR_FORMAT" "
  4041                           "["PTR_FORMAT","PTR_FORMAT")\n",
  4042                           r, r->bottom(), r->end());
  4046   push_on_evac_failure_scan_stack(old);
  4048   if (!_drain_in_progress) {
  4049     // prevent recursion in copy_to_survivor_space()
  4050     _drain_in_progress = true;
  4051     drain_evac_failure_scan_stack();
  4052     _drain_in_progress = false;
  4056 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  4057   assert(evacuation_failed(), "Oversaving!");
  4058   // We want to call the "for_promotion_failure" version only in the
  4059   // case of a promotion failure.
  4060   if (m->must_be_preserved_for_promotion_failure(obj)) {
  4061     if (_objs_with_preserved_marks == NULL) {
  4062       assert(_preserved_marks_of_objs == NULL, "Both or none.");
  4063       _objs_with_preserved_marks =
  4064         new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  4065       _preserved_marks_of_objs =
  4066         new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
  4068     _objs_with_preserved_marks->push(obj);
  4069     _preserved_marks_of_objs->push(m);
  4073 // *** Parallel G1 Evacuation
  4075 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  4076                                                   size_t word_size) {
  4077   assert(!isHumongous(word_size),
  4078          err_msg("we should not be seeing humongous allocation requests "
  4079                  "during GC, word_size = "SIZE_FORMAT, word_size));
  4081   HeapRegion* alloc_region = _gc_alloc_regions[purpose];
  4082   // let the caller handle alloc failure
  4083   if (alloc_region == NULL) return NULL;
  4085   HeapWord* block = alloc_region->par_allocate(word_size);
  4086   if (block == NULL) {
  4087     MutexLockerEx x(par_alloc_during_gc_lock(),
  4088                     Mutex::_no_safepoint_check_flag);
  4089     block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
  4091   return block;
  4094 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
  4095                                             bool par) {
  4096   // Another thread might have obtained alloc_region for the given
  4097   // purpose, and might be attempting to allocate in it, and might
  4098   // succeed.  Therefore, we can't do the "finalization" stuff on the
  4099   // region below until we're sure the last allocation has happened.
  4100   // We ensure this by allocating the remaining space with a garbage
  4101   // object.
  4102   if (par) par_allocate_remaining_space(alloc_region);
  4103   // Now we can do the post-GC stuff on the region.
  4104   alloc_region->note_end_of_copying();
  4105   g1_policy()->record_after_bytes(alloc_region->used());
  4108 HeapWord*
  4109 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
  4110                                          HeapRegion*    alloc_region,
  4111                                          bool           par,
  4112                                          size_t         word_size) {
  4113   assert(!isHumongous(word_size),
  4114          err_msg("we should not be seeing humongous allocation requests "
  4115                  "during GC, word_size = "SIZE_FORMAT, word_size));
  4117   HeapWord* block = NULL;
  4118   // In the parallel case, a previous thread to obtain the lock may have
  4119   // already assigned a new gc_alloc_region.
  4120   if (alloc_region != _gc_alloc_regions[purpose]) {
  4121     assert(par, "But should only happen in parallel case.");
  4122     alloc_region = _gc_alloc_regions[purpose];
  4123     if (alloc_region == NULL) return NULL;
  4124     block = alloc_region->par_allocate(word_size);
  4125     if (block != NULL) return block;
  4126     // Otherwise, continue; this new region is empty, too.
  4128   assert(alloc_region != NULL, "We better have an allocation region");
  4129   retire_alloc_region(alloc_region, par);
  4131   if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
  4132     // Cannot allocate more regions for the given purpose.
  4133     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
  4134     // Is there an alternative?
  4135     if (purpose != alt_purpose) {
  4136       HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
  4137       // Has not the alternative region been aliased?
  4138       if (alloc_region != alt_region && alt_region != NULL) {
  4139         // Try to allocate in the alternative region.
  4140         if (par) {
  4141           block = alt_region->par_allocate(word_size);
  4142         } else {
  4143           block = alt_region->allocate(word_size);
  4145         // Make an alias.
  4146         _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
  4147         if (block != NULL) {
  4148           return block;
  4150         retire_alloc_region(alt_region, par);
  4152       // Both the allocation region and the alternative one are full
  4153       // and aliased, replace them with a new allocation region.
  4154       purpose = alt_purpose;
  4155     } else {
  4156       set_gc_alloc_region(purpose, NULL);
  4157       return NULL;
  4161   // Now allocate a new region for allocation.
  4162   alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
  4164   // let the caller handle alloc failure
  4165   if (alloc_region != NULL) {
  4167     assert(check_gc_alloc_regions(), "alloc regions messed up");
  4168     assert(alloc_region->saved_mark_at_top(),
  4169            "Mark should have been saved already.");
  4170     // We used to assert that the region was zero-filled here, but no
  4171     // longer.
  4173     // This must be done last: once it's installed, other regions may
  4174     // allocate in it (without holding the lock.)
  4175     set_gc_alloc_region(purpose, alloc_region);
  4177     if (par) {
  4178       block = alloc_region->par_allocate(word_size);
  4179     } else {
  4180       block = alloc_region->allocate(word_size);
  4182     // Caller handles alloc failure.
  4183   } else {
  4184     // This sets other apis using the same old alloc region to NULL, also.
  4185     set_gc_alloc_region(purpose, NULL);
  4187   return block;  // May be NULL.
  4190 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
  4191   HeapWord* block = NULL;
  4192   size_t free_words;
  4193   do {
  4194     free_words = r->free()/HeapWordSize;
  4195     // If there's too little space, no one can allocate, so we're done.
  4196     if (free_words < CollectedHeap::min_fill_size()) return;
  4197     // Otherwise, try to claim it.
  4198     block = r->par_allocate(free_words);
  4199   } while (block == NULL);
  4200   fill_with_object(block, free_words);
  4203 #ifndef PRODUCT
  4204 bool GCLabBitMapClosure::do_bit(size_t offset) {
  4205   HeapWord* addr = _bitmap->offsetToHeapWord(offset);
  4206   guarantee(_cm->isMarked(oop(addr)), "it should be!");
  4207   return true;
  4209 #endif // PRODUCT
  4211 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
  4212   : _g1h(g1h),
  4213     _refs(g1h->task_queue(queue_num)),
  4214     _dcq(&g1h->dirty_card_queue_set()),
  4215     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
  4216     _g1_rem(g1h->g1_rem_set()),
  4217     _hash_seed(17), _queue_num(queue_num),
  4218     _term_attempts(0),
  4219     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
  4220     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
  4221     _age_table(false),
  4222     _strong_roots_time(0), _term_time(0),
  4223     _alloc_buffer_waste(0), _undo_waste(0)
  4225   // we allocate G1YoungSurvRateNumRegions plus one entries, since
  4226   // we "sacrifice" entry 0 to keep track of surviving bytes for
  4227   // non-young regions (where the age is -1)
  4228   // We also add a few elements at the beginning and at the end in
  4229   // an attempt to eliminate cache contention
  4230   size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
  4231   size_t array_length = PADDING_ELEM_NUM +
  4232                         real_length +
  4233                         PADDING_ELEM_NUM;
  4234   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
  4235   if (_surviving_young_words_base == NULL)
  4236     vm_exit_out_of_memory(array_length * sizeof(size_t),
  4237                           "Not enough space for young surv histo.");
  4238   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  4239   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  4241   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
  4242   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
  4244   _start = os::elapsedTime();
  4247 void
  4248 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
  4250   st->print_raw_cr("GC Termination Stats");
  4251   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
  4252                    " ------waste (KiB)------");
  4253   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
  4254                    "  total   alloc    undo");
  4255   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
  4256                    " ------- ------- -------");
  4259 void
  4260 G1ParScanThreadState::print_termination_stats(int i,
  4261                                               outputStream* const st) const
  4263   const double elapsed_ms = elapsed_time() * 1000.0;
  4264   const double s_roots_ms = strong_roots_time() * 1000.0;
  4265   const double term_ms    = term_time() * 1000.0;
  4266   st->print_cr("%3d %9.2f %9.2f %6.2f "
  4267                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
  4268                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
  4269                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
  4270                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
  4271                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
  4272                alloc_buffer_waste() * HeapWordSize / K,
  4273                undo_waste() * HeapWordSize / K);
  4276 #ifdef ASSERT
  4277 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
  4278   assert(ref != NULL, "invariant");
  4279   assert(UseCompressedOops, "sanity");
  4280   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
  4281   oop p = oopDesc::load_decode_heap_oop(ref);
  4282   assert(_g1h->is_in_g1_reserved(p),
  4283          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4284   return true;
  4287 bool G1ParScanThreadState::verify_ref(oop* ref) const {
  4288   assert(ref != NULL, "invariant");
  4289   if (has_partial_array_mask(ref)) {
  4290     // Must be in the collection set--it's already been copied.
  4291     oop p = clear_partial_array_mask(ref);
  4292     assert(_g1h->obj_in_cs(p),
  4293            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4294   } else {
  4295     oop p = oopDesc::load_decode_heap_oop(ref);
  4296     assert(_g1h->is_in_g1_reserved(p),
  4297            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
  4299   return true;
  4302 bool G1ParScanThreadState::verify_task(StarTask ref) const {
  4303   if (ref.is_narrow()) {
  4304     return verify_ref((narrowOop*) ref);
  4305   } else {
  4306     return verify_ref((oop*) ref);
  4309 #endif // ASSERT
  4311 void G1ParScanThreadState::trim_queue() {
  4312   StarTask ref;
  4313   do {
  4314     // Drain the overflow stack first, so other threads can steal.
  4315     while (refs()->pop_overflow(ref)) {
  4316       deal_with_reference(ref);
  4318     while (refs()->pop_local(ref)) {
  4319       deal_with_reference(ref);
  4321   } while (!refs()->is_empty());
  4324 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  4325   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  4326   _par_scan_state(par_scan_state) { }
  4328 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
  4329   // This is called _after_ do_oop_work has been called, hence after
  4330   // the object has been relocated to its new location and *p points
  4331   // to its new location.
  4333   T heap_oop = oopDesc::load_heap_oop(p);
  4334   if (!oopDesc::is_null(heap_oop)) {
  4335     oop obj = oopDesc::decode_heap_oop(heap_oop);
  4336     assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
  4337            "shouldn't still be in the CSet if evacuation didn't fail.");
  4338     HeapWord* addr = (HeapWord*)obj;
  4339     if (_g1->is_in_g1_reserved(addr))
  4340       _cm->grayRoot(oop(addr));
  4344 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
  4345   size_t    word_sz = old->size();
  4346   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  4347   // +1 to make the -1 indexes valid...
  4348   int       young_index = from_region->young_index_in_cset()+1;
  4349   assert( (from_region->is_young() && young_index > 0) ||
  4350           (!from_region->is_young() && young_index == 0), "invariant" );
  4351   G1CollectorPolicy* g1p = _g1->g1_policy();
  4352   markOop m = old->mark();
  4353   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  4354                                            : m->age();
  4355   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  4356                                                              word_sz);
  4357   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
  4358   oop       obj     = oop(obj_ptr);
  4360   if (obj_ptr == NULL) {
  4361     // This will either forward-to-self, or detect that someone else has
  4362     // installed a forwarding pointer.
  4363     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  4364     return _g1->handle_evacuation_failure_par(cl, old);
  4367   // We're going to allocate linearly, so might as well prefetch ahead.
  4368   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
  4370   oop forward_ptr = old->forward_to_atomic(obj);
  4371   if (forward_ptr == NULL) {
  4372     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
  4373     if (g1p->track_object_age(alloc_purpose)) {
  4374       // We could simply do obj->incr_age(). However, this causes a
  4375       // performance issue. obj->incr_age() will first check whether
  4376       // the object has a displaced mark by checking its mark word;
  4377       // getting the mark word from the new location of the object
  4378       // stalls. So, given that we already have the mark word and we
  4379       // are about to install it anyway, it's better to increase the
  4380       // age on the mark word, when the object does not have a
  4381       // displaced mark word. We're not expecting many objects to have
  4382       // a displaced marked word, so that case is not optimized
  4383       // further (it could be...) and we simply call obj->incr_age().
  4385       if (m->has_displaced_mark_helper()) {
  4386         // in this case, we have to install the mark word first,
  4387         // otherwise obj looks to be forwarded (the old mark word,
  4388         // which contains the forward pointer, was copied)
  4389         obj->set_mark(m);
  4390         obj->incr_age();
  4391       } else {
  4392         m = m->incr_age();
  4393         obj->set_mark(m);
  4395       _par_scan_state->age_table()->add(obj, word_sz);
  4396     } else {
  4397       obj->set_mark(m);
  4400     // preserve "next" mark bit
  4401     if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
  4402       if (!use_local_bitmaps ||
  4403           !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
  4404         // if we couldn't mark it on the local bitmap (this happens when
  4405         // the object was not allocated in the GCLab), we have to bite
  4406         // the bullet and do the standard parallel mark
  4407         _cm->markAndGrayObjectIfNecessary(obj);
  4409 #if 1
  4410       if (_g1->isMarkedNext(old)) {
  4411         _cm->nextMarkBitMap()->parClear((HeapWord*)old);
  4413 #endif
  4416     size_t* surv_young_words = _par_scan_state->surviving_young_words();
  4417     surv_young_words[young_index] += word_sz;
  4419     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
  4420       arrayOop(old)->set_length(0);
  4421       oop* old_p = set_partial_array_mask(old);
  4422       _par_scan_state->push_on_queue(old_p);
  4423     } else {
  4424       // No point in using the slower heap_region_containing() method,
  4425       // given that we know obj is in the heap.
  4426       _scanner->set_region(_g1->heap_region_containing_raw(obj));
  4427       obj->oop_iterate_backwards(_scanner);
  4429   } else {
  4430     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
  4431     obj = forward_ptr;
  4433   return obj;
  4436 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
  4437 template <class T>
  4438 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee>
  4439 ::do_oop_work(T* p) {
  4440   oop obj = oopDesc::load_decode_heap_oop(p);
  4441   assert(barrier != G1BarrierRS || obj != NULL,
  4442          "Precondition: G1BarrierRS implies obj is nonNull");
  4444   // here the null check is implicit in the cset_fast_test() test
  4445   if (_g1->in_cset_fast_test(obj)) {
  4446 #if G1_REM_SET_LOGGING
  4447     gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
  4448                            "into CS.", p, (void*) obj);
  4449 #endif
  4450     if (obj->is_forwarded()) {
  4451       oopDesc::encode_store_heap_oop(p, obj->forwardee());
  4452     } else {
  4453       oop copy_oop = copy_to_survivor_space(obj);
  4454       oopDesc::encode_store_heap_oop(p, copy_oop);
  4456     // When scanning the RS, we only care about objs in CS.
  4457     if (barrier == G1BarrierRS) {
  4458       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  4462   if (barrier == G1BarrierEvac && obj != NULL) {
  4463     _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  4466   if (do_gen_barrier && obj != NULL) {
  4467     par_do_barrier(p);
  4471 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
  4472 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
  4474 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
  4475   assert(has_partial_array_mask(p), "invariant");
  4476   oop old = clear_partial_array_mask(p);
  4477   assert(old->is_objArray(), "must be obj array");
  4478   assert(old->is_forwarded(), "must be forwarded");
  4479   assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
  4481   objArrayOop obj = objArrayOop(old->forwardee());
  4482   assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
  4483   // Process ParGCArrayScanChunk elements now
  4484   // and push the remainder back onto queue
  4485   int start     = arrayOop(old)->length();
  4486   int end       = obj->length();
  4487   int remainder = end - start;
  4488   assert(start <= end, "just checking");
  4489   if (remainder > 2 * ParGCArrayScanChunk) {
  4490     // Test above combines last partial chunk with a full chunk
  4491     end = start + ParGCArrayScanChunk;
  4492     arrayOop(old)->set_length(end);
  4493     // Push remainder.
  4494     oop* old_p = set_partial_array_mask(old);
  4495     assert(arrayOop(old)->length() < obj->length(), "Empty push?");
  4496     _par_scan_state->push_on_queue(old_p);
  4497   } else {
  4498     // Restore length so that the heap remains parsable in
  4499     // case of evacuation failure.
  4500     arrayOop(old)->set_length(end);
  4502   _scanner.set_region(_g1->heap_region_containing_raw(obj));
  4503   // process our set of indices (include header in first chunk)
  4504   obj->oop_iterate_range(&_scanner, start, end);
  4507 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4508 protected:
  4509   G1CollectedHeap*              _g1h;
  4510   G1ParScanThreadState*         _par_scan_state;
  4511   RefToScanQueueSet*            _queues;
  4512   ParallelTaskTerminator*       _terminator;
  4514   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  4515   RefToScanQueueSet*      queues()         { return _queues; }
  4516   ParallelTaskTerminator* terminator()     { return _terminator; }
  4518 public:
  4519   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  4520                                 G1ParScanThreadState* par_scan_state,
  4521                                 RefToScanQueueSet* queues,
  4522                                 ParallelTaskTerminator* terminator)
  4523     : _g1h(g1h), _par_scan_state(par_scan_state),
  4524       _queues(queues), _terminator(terminator) {}
  4526   void do_void();
  4528 private:
  4529   inline bool offer_termination();
  4530 };
  4532 bool G1ParEvacuateFollowersClosure::offer_termination() {
  4533   G1ParScanThreadState* const pss = par_scan_state();
  4534   pss->start_term_time();
  4535   const bool res = terminator()->offer_termination();
  4536   pss->end_term_time();
  4537   return res;
  4540 void G1ParEvacuateFollowersClosure::do_void() {
  4541   StarTask stolen_task;
  4542   G1ParScanThreadState* const pss = par_scan_state();
  4543   pss->trim_queue();
  4545   do {
  4546     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  4547       assert(pss->verify_task(stolen_task), "sanity");
  4548       if (stolen_task.is_narrow()) {
  4549         pss->deal_with_reference((narrowOop*) stolen_task);
  4550       } else {
  4551         pss->deal_with_reference((oop*) stolen_task);
  4554       // We've just processed a reference and we might have made
  4555       // available new entries on the queues. So we have to make sure
  4556       // we drain the queues as necessary.
  4557       pss->trim_queue();
  4559   } while (!offer_termination());
  4561   pss->retire_alloc_buffers();
  4564 class G1ParTask : public AbstractGangTask {
  4565 protected:
  4566   G1CollectedHeap*       _g1h;
  4567   RefToScanQueueSet      *_queues;
  4568   ParallelTaskTerminator _terminator;
  4569   int _n_workers;
  4571   Mutex _stats_lock;
  4572   Mutex* stats_lock() { return &_stats_lock; }
  4574   size_t getNCards() {
  4575     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  4576       / G1BlockOffsetSharedArray::N_bytes;
  4579 public:
  4580   G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
  4581     : AbstractGangTask("G1 collection"),
  4582       _g1h(g1h),
  4583       _queues(task_queues),
  4584       _terminator(workers, _queues),
  4585       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
  4586       _n_workers(workers)
  4587   {}
  4589   RefToScanQueueSet* queues() { return _queues; }
  4591   RefToScanQueue *work_queue(int i) {
  4592     return queues()->queue(i);
  4595   void work(int i) {
  4596     if (i >= _n_workers) return;  // no work needed this round
  4598     double start_time_ms = os::elapsedTime() * 1000.0;
  4599     _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
  4601     ResourceMark rm;
  4602     HandleMark   hm;
  4604     G1ParScanThreadState            pss(_g1h, i);
  4605     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
  4606     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
  4607     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
  4609     pss.set_evac_closure(&scan_evac_cl);
  4610     pss.set_evac_failure_closure(&evac_failure_cl);
  4611     pss.set_partial_scan_closure(&partial_scan_cl);
  4613     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  4614     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  4615     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
  4616     G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
  4618     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  4619     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  4620     G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  4622     OopsInHeapRegionClosure        *scan_root_cl;
  4623     OopsInHeapRegionClosure        *scan_perm_cl;
  4625     if (_g1h->g1_policy()->during_initial_mark_pause()) {
  4626       scan_root_cl = &scan_mark_root_cl;
  4627       scan_perm_cl = &scan_mark_perm_cl;
  4628     } else {
  4629       scan_root_cl = &only_scan_root_cl;
  4630       scan_perm_cl = &only_scan_perm_cl;
  4633     pss.start_strong_roots();
  4634     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  4635                                   SharedHeap::SO_AllClasses,
  4636                                   scan_root_cl,
  4637                                   &push_heap_rs_cl,
  4638                                   scan_perm_cl,
  4639                                   i);
  4640     pss.end_strong_roots();
  4642       double start = os::elapsedTime();
  4643       G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  4644       evac.do_void();
  4645       double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  4646       double term_ms = pss.term_time()*1000.0;
  4647       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
  4648       _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
  4650     _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  4651     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  4653     // Clean up any par-expanded rem sets.
  4654     HeapRegionRemSet::par_cleanup();
  4656     if (ParallelGCVerbose) {
  4657       MutexLocker x(stats_lock());
  4658       pss.print_termination_stats(i);
  4661     assert(pss.refs()->is_empty(), "should be empty");
  4662     double end_time_ms = os::elapsedTime() * 1000.0;
  4663     _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
  4665 };
  4667 // *** Common G1 Evacuation Stuff
  4669 // This method is run in a GC worker.
  4671 void
  4672 G1CollectedHeap::
  4673 g1_process_strong_roots(bool collecting_perm_gen,
  4674                         SharedHeap::ScanningOption so,
  4675                         OopClosure* scan_non_heap_roots,
  4676                         OopsInHeapRegionClosure* scan_rs,
  4677                         OopsInGenClosure* scan_perm,
  4678                         int worker_i) {
  4679   // First scan the strong roots, including the perm gen.
  4680   double ext_roots_start = os::elapsedTime();
  4681   double closure_app_time_sec = 0.0;
  4683   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4684   BufferingOopsInGenClosure buf_scan_perm(scan_perm);
  4685   buf_scan_perm.set_generation(perm_gen());
  4687   // Walk the code cache w/o buffering, because StarTask cannot handle
  4688   // unaligned oop locations.
  4689   CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true);
  4691   process_strong_roots(false, // no scoping; this is parallel code
  4692                        collecting_perm_gen, so,
  4693                        &buf_scan_non_heap_roots,
  4694                        &eager_scan_code_roots,
  4695                        &buf_scan_perm);
  4697   // Finish up any enqueued closure apps.
  4698   buf_scan_non_heap_roots.done();
  4699   buf_scan_perm.done();
  4700   double ext_roots_end = os::elapsedTime();
  4701   g1_policy()->reset_obj_copy_time(worker_i);
  4702   double obj_copy_time_sec =
  4703     buf_scan_non_heap_roots.closure_app_seconds() +
  4704     buf_scan_perm.closure_app_seconds();
  4705   g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  4706   double ext_root_time_ms =
  4707     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  4708   g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  4710   // Scan strong roots in mark stack.
  4711   if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
  4712     concurrent_mark()->oops_do(scan_non_heap_roots);
  4714   double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
  4715   g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
  4717   // XXX What should this be doing in the parallel case?
  4718   g1_policy()->record_collection_pause_end_CH_strong_roots();
  4719   // Now scan the complement of the collection set.
  4720   if (scan_rs != NULL) {
  4721     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
  4723   // Finish with the ref_processor roots.
  4724   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4725     // We need to treat the discovered reference lists as roots and
  4726     // keep entries (which are added by the marking threads) on them
  4727     // live until they can be processed at the end of marking.
  4728     ref_processor()->weak_oops_do(scan_non_heap_roots);
  4729     ref_processor()->oops_do(scan_non_heap_roots);
  4731   g1_policy()->record_collection_pause_end_G1_strong_roots();
  4732   _process_strong_tasks->all_tasks_completed();
  4735 void
  4736 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
  4737                                        OopClosure* non_root_closure) {
  4738   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
  4739   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
  4743 class SaveMarksClosure: public HeapRegionClosure {
  4744 public:
  4745   bool doHeapRegion(HeapRegion* r) {
  4746     r->save_marks();
  4747     return false;
  4749 };
  4751 void G1CollectedHeap::save_marks() {
  4752   if (!CollectedHeap::use_parallel_gc_threads()) {
  4753     SaveMarksClosure sm;
  4754     heap_region_iterate(&sm);
  4756   // We do this even in the parallel case
  4757   perm_gen()->save_marks();
  4760 void G1CollectedHeap::evacuate_collection_set() {
  4761   set_evacuation_failed(false);
  4763   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  4764   concurrent_g1_refine()->set_use_cache(false);
  4765   concurrent_g1_refine()->clear_hot_cache_claimed_index();
  4767   int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
  4768   set_par_threads(n_workers);
  4769   G1ParTask g1_par_task(this, n_workers, _task_queues);
  4771   init_for_evac_failure(NULL);
  4773   rem_set()->prepare_for_younger_refs_iterate(true);
  4775   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  4776   double start_par = os::elapsedTime();
  4777   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4778     // The individual threads will set their evac-failure closures.
  4779     StrongRootsScope srs(this);
  4780     if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
  4781     workers()->run_task(&g1_par_task);
  4782   } else {
  4783     StrongRootsScope srs(this);
  4784     g1_par_task.work(0);
  4787   double par_time = (os::elapsedTime() - start_par) * 1000.0;
  4788   g1_policy()->record_par_time(par_time);
  4789   set_par_threads(0);
  4790   // Is this the right thing to do here?  We don't save marks
  4791   // on individual heap regions when we allocate from
  4792   // them in parallel, so this seems like the correct place for this.
  4793   retire_all_alloc_regions();
  4795   // Weak root processing.
  4796   // Note: when JSR 292 is enabled and code blobs can contain
  4797   // non-perm oops then we will need to process the code blobs
  4798   // here too.
  4800     G1IsAliveClosure is_alive(this);
  4801     G1KeepAliveClosure keep_alive(this);
  4802     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  4804   release_gc_alloc_regions(false /* totally */);
  4805   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  4807   concurrent_g1_refine()->clear_hot_cache();
  4808   concurrent_g1_refine()->set_use_cache(true);
  4810   finalize_for_evac_failure();
  4812   // Must do this before removing self-forwarding pointers, which clears
  4813   // the per-region evac-failure flags.
  4814   concurrent_mark()->complete_marking_in_collection_set();
  4816   if (evacuation_failed()) {
  4817     remove_self_forwarding_pointers();
  4818     if (PrintGCDetails) {
  4819       gclog_or_tty->print(" (to-space overflow)");
  4820     } else if (PrintGC) {
  4821       gclog_or_tty->print("--");
  4825   if (G1DeferredRSUpdate) {
  4826     RedirtyLoggedCardTableEntryFastClosure redirty;
  4827     dirty_card_queue_set().set_closure(&redirty);
  4828     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  4830     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
  4831     dcq.merge_bufferlists(&dirty_card_queue_set());
  4832     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  4834   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4837 void G1CollectedHeap::free_region(HeapRegion* hr) {
  4838   size_t pre_used = 0;
  4839   size_t cleared_h_regions = 0;
  4840   size_t freed_regions = 0;
  4841   UncleanRegionList local_list;
  4843   HeapWord* start = hr->bottom();
  4844   HeapWord* end   = hr->prev_top_at_mark_start();
  4845   size_t used_bytes = hr->used();
  4846   size_t live_bytes = hr->max_live_bytes();
  4847   if (used_bytes > 0) {
  4848     guarantee( live_bytes <= used_bytes, "invariant" );
  4849   } else {
  4850     guarantee( live_bytes == 0, "invariant" );
  4853   size_t garbage_bytes = used_bytes - live_bytes;
  4854   if (garbage_bytes > 0)
  4855     g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
  4857   free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
  4858                    &local_list);
  4859   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
  4860                           &local_list);
  4863 void
  4864 G1CollectedHeap::free_region_work(HeapRegion* hr,
  4865                                   size_t& pre_used,
  4866                                   size_t& cleared_h_regions,
  4867                                   size_t& freed_regions,
  4868                                   UncleanRegionList* list,
  4869                                   bool par) {
  4870   pre_used += hr->used();
  4871   if (hr->isHumongous()) {
  4872     assert(hr->startsHumongous(),
  4873            "Only the start of a humongous region should be freed.");
  4874     int ind = _hrs->find(hr);
  4875     assert(ind != -1, "Should have an index.");
  4876     // Clear the start region.
  4877     hr->hr_clear(par, true /*clear_space*/);
  4878     list->insert_before_head(hr);
  4879     cleared_h_regions++;
  4880     freed_regions++;
  4881     // Clear any continued regions.
  4882     ind++;
  4883     while ((size_t)ind < n_regions()) {
  4884       HeapRegion* hrc = _hrs->at(ind);
  4885       if (!hrc->continuesHumongous()) break;
  4886       // Otherwise, does continue the H region.
  4887       assert(hrc->humongous_start_region() == hr, "Huh?");
  4888       hrc->hr_clear(par, true /*clear_space*/);
  4889       cleared_h_regions++;
  4890       freed_regions++;
  4891       list->insert_before_head(hrc);
  4892       ind++;
  4894   } else {
  4895     hr->hr_clear(par, true /*clear_space*/);
  4896     list->insert_before_head(hr);
  4897     freed_regions++;
  4898     // If we're using clear2, this should not be enabled.
  4899     // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
  4903 void G1CollectedHeap::finish_free_region_work(size_t pre_used,
  4904                                               size_t cleared_h_regions,
  4905                                               size_t freed_regions,
  4906                                               UncleanRegionList* list) {
  4907   if (list != NULL && list->sz() > 0) {
  4908     prepend_region_list_on_unclean_list(list);
  4910   // Acquire a lock, if we're parallel, to update possibly-shared
  4911   // variables.
  4912   Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
  4914     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  4915     _summary_bytes_used -= pre_used;
  4916     _num_humongous_regions -= (int) cleared_h_regions;
  4917     _free_regions += freed_regions;
  4922 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
  4923   while (list != NULL) {
  4924     guarantee( list->is_young(), "invariant" );
  4926     HeapWord* bottom = list->bottom();
  4927     HeapWord* end = list->end();
  4928     MemRegion mr(bottom, end);
  4929     ct_bs->dirty(mr);
  4931     list = list->get_next_young_region();
  4936 class G1ParCleanupCTTask : public AbstractGangTask {
  4937   CardTableModRefBS* _ct_bs;
  4938   G1CollectedHeap* _g1h;
  4939   HeapRegion* volatile _su_head;
  4940 public:
  4941   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
  4942                      G1CollectedHeap* g1h,
  4943                      HeapRegion* survivor_list) :
  4944     AbstractGangTask("G1 Par Cleanup CT Task"),
  4945     _ct_bs(ct_bs),
  4946     _g1h(g1h),
  4947     _su_head(survivor_list)
  4948   { }
  4950   void work(int i) {
  4951     HeapRegion* r;
  4952     while (r = _g1h->pop_dirty_cards_region()) {
  4953       clear_cards(r);
  4955     // Redirty the cards of the survivor regions.
  4956     dirty_list(&this->_su_head);
  4959   void clear_cards(HeapRegion* r) {
  4960     // Cards for Survivor regions will be dirtied later.
  4961     if (!r->is_survivor()) {
  4962       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  4966   void dirty_list(HeapRegion* volatile * head_ptr) {
  4967     HeapRegion* head;
  4968     do {
  4969       // Pop region off the list.
  4970       head = *head_ptr;
  4971       if (head != NULL) {
  4972         HeapRegion* r = (HeapRegion*)
  4973           Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
  4974         if (r == head) {
  4975           assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
  4976           _ct_bs->dirty(MemRegion(r->bottom(), r->end()));
  4979     } while (*head_ptr != NULL);
  4981 };
  4984 #ifndef PRODUCT
  4985 class G1VerifyCardTableCleanup: public HeapRegionClosure {
  4986   CardTableModRefBS* _ct_bs;
  4987 public:
  4988   G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
  4989     : _ct_bs(ct_bs)
  4990   { }
  4991   virtual bool doHeapRegion(HeapRegion* r)
  4993     MemRegion mr(r->bottom(), r->end());
  4994     if (r->is_survivor()) {
  4995       _ct_bs->verify_dirty_region(mr);
  4996     } else {
  4997       _ct_bs->verify_clean_region(mr);
  4999     return false;
  5001 };
  5002 #endif
  5004 void G1CollectedHeap::cleanUpCardTable() {
  5005   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  5006   double start = os::elapsedTime();
  5008   // Iterate over the dirty cards region list.
  5009   G1ParCleanupCTTask cleanup_task(ct_bs, this,
  5010                                   _young_list->first_survivor_region());
  5012   if (ParallelGCThreads > 0) {
  5013     set_par_threads(workers()->total_workers());
  5014     workers()->run_task(&cleanup_task);
  5015     set_par_threads(0);
  5016   } else {
  5017     while (_dirty_cards_region_list) {
  5018       HeapRegion* r = _dirty_cards_region_list;
  5019       cleanup_task.clear_cards(r);
  5020       _dirty_cards_region_list = r->get_next_dirty_cards_region();
  5021       if (_dirty_cards_region_list == r) {
  5022         // The last region.
  5023         _dirty_cards_region_list = NULL;
  5025       r->set_next_dirty_cards_region(NULL);
  5027     // now, redirty the cards of the survivor regions
  5028     // (it seemed faster to do it this way, instead of iterating over
  5029     // all regions and then clearing / dirtying as appropriate)
  5030     dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
  5033   double elapsed = os::elapsedTime() - start;
  5034   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
  5035 #ifndef PRODUCT
  5036   if (G1VerifyCTCleanup || VerifyAfterGC) {
  5037     G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
  5038     heap_region_iterate(&cleanup_verifier);
  5040 #endif
  5043 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  5044   double young_time_ms     = 0.0;
  5045   double non_young_time_ms = 0.0;
  5047   // Since the collection set is a superset of the the young list,
  5048   // all we need to do to clear the young list is clear its
  5049   // head and length, and unlink any young regions in the code below
  5050   _young_list->clear();
  5052   G1CollectorPolicy* policy = g1_policy();
  5054   double start_sec = os::elapsedTime();
  5055   bool non_young = true;
  5057   HeapRegion* cur = cs_head;
  5058   int age_bound = -1;
  5059   size_t rs_lengths = 0;
  5061   while (cur != NULL) {
  5062     if (non_young) {
  5063       if (cur->is_young()) {
  5064         double end_sec = os::elapsedTime();
  5065         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5066         non_young_time_ms += elapsed_ms;
  5068         start_sec = os::elapsedTime();
  5069         non_young = false;
  5071     } else {
  5072       if (!cur->is_on_free_list()) {
  5073         double end_sec = os::elapsedTime();
  5074         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5075         young_time_ms += elapsed_ms;
  5077         start_sec = os::elapsedTime();
  5078         non_young = true;
  5082     rs_lengths += cur->rem_set()->occupied();
  5084     HeapRegion* next = cur->next_in_collection_set();
  5085     assert(cur->in_collection_set(), "bad CS");
  5086     cur->set_next_in_collection_set(NULL);
  5087     cur->set_in_collection_set(false);
  5089     if (cur->is_young()) {
  5090       int index = cur->young_index_in_cset();
  5091       guarantee( index != -1, "invariant" );
  5092       guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
  5093       size_t words_survived = _surviving_young_words[index];
  5094       cur->record_surv_words_in_group(words_survived);
  5096       // At this point the we have 'popped' cur from the collection set
  5097       // (linked via next_in_collection_set()) but it is still in the
  5098       // young list (linked via next_young_region()). Clear the
  5099       // _next_young_region field.
  5100       cur->set_next_young_region(NULL);
  5101     } else {
  5102       int index = cur->young_index_in_cset();
  5103       guarantee( index == -1, "invariant" );
  5106     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  5107             (!cur->is_young() && cur->young_index_in_cset() == -1),
  5108             "invariant" );
  5110     if (!cur->evacuation_failed()) {
  5111       // And the region is empty.
  5112       assert(!cur->is_empty(),
  5113              "Should not have empty regions in a CS.");
  5114       free_region(cur);
  5115     } else {
  5116       cur->uninstall_surv_rate_group();
  5117       if (cur->is_young())
  5118         cur->set_young_index_in_cset(-1);
  5119       cur->set_not_young();
  5120       cur->set_evacuation_failed(false);
  5122     cur = next;
  5125   policy->record_max_rs_lengths(rs_lengths);
  5126   policy->cset_regions_freed();
  5128   double end_sec = os::elapsedTime();
  5129   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  5130   if (non_young)
  5131     non_young_time_ms += elapsed_ms;
  5132   else
  5133     young_time_ms += elapsed_ms;
  5135   policy->record_young_free_cset_time_ms(young_time_ms);
  5136   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
  5139 // This routine is similar to the above but does not record
  5140 // any policy statistics or update free lists; we are abandoning
  5141 // the current incremental collection set in preparation of a
  5142 // full collection. After the full GC we will start to build up
  5143 // the incremental collection set again.
  5144 // This is only called when we're doing a full collection
  5145 // and is immediately followed by the tearing down of the young list.
  5147 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
  5148   HeapRegion* cur = cs_head;
  5150   while (cur != NULL) {
  5151     HeapRegion* next = cur->next_in_collection_set();
  5152     assert(cur->in_collection_set(), "bad CS");
  5153     cur->set_next_in_collection_set(NULL);
  5154     cur->set_in_collection_set(false);
  5155     cur->set_young_index_in_cset(-1);
  5156     cur = next;
  5160 HeapRegion*
  5161 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
  5162   assert(ZF_mon->owned_by_self(), "Precondition");
  5163   HeapRegion* res = pop_unclean_region_list_locked();
  5164   if (res != NULL) {
  5165     assert(!res->continuesHumongous() &&
  5166            res->zero_fill_state() != HeapRegion::Allocated,
  5167            "Only free regions on unclean list.");
  5168     if (zero_filled) {
  5169       res->ensure_zero_filled_locked();
  5170       res->set_zero_fill_allocated();
  5173   return res;
  5176 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
  5177   MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
  5178   return alloc_region_from_unclean_list_locked(zero_filled);
  5181 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
  5182   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5183   put_region_on_unclean_list_locked(r);
  5184   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  5187 void G1CollectedHeap::set_unclean_regions_coming(bool b) {
  5188   MutexLockerEx x(Cleanup_mon);
  5189   set_unclean_regions_coming_locked(b);
  5192 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
  5193   assert(Cleanup_mon->owned_by_self(), "Precondition");
  5194   _unclean_regions_coming = b;
  5195   // Wake up mutator threads that might be waiting for completeCleanup to
  5196   // finish.
  5197   if (!b) Cleanup_mon->notify_all();
  5200 void G1CollectedHeap::wait_for_cleanup_complete() {
  5201   assert_not_at_safepoint();
  5202   MutexLockerEx x(Cleanup_mon);
  5203   wait_for_cleanup_complete_locked();
  5206 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
  5207   assert(Cleanup_mon->owned_by_self(), "precondition");
  5208   while (_unclean_regions_coming) {
  5209     Cleanup_mon->wait();
  5213 void
  5214 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
  5215   assert(ZF_mon->owned_by_self(), "precondition.");
  5216 #ifdef ASSERT
  5217   if (r->is_gc_alloc_region()) {
  5218     ResourceMark rm;
  5219     stringStream region_str;
  5220     print_on(&region_str);
  5221     assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s",
  5222                                              region_str.as_string()));
  5224 #endif
  5225   _unclean_region_list.insert_before_head(r);
  5228 void
  5229 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
  5230   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5231   prepend_region_list_on_unclean_list_locked(list);
  5232   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  5235 void
  5236 G1CollectedHeap::
  5237 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
  5238   assert(ZF_mon->owned_by_self(), "precondition.");
  5239   _unclean_region_list.prepend_list(list);
  5242 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
  5243   assert(ZF_mon->owned_by_self(), "precondition.");
  5244   HeapRegion* res = _unclean_region_list.pop();
  5245   if (res != NULL) {
  5246     // Inform ZF thread that there's a new unclean head.
  5247     if (_unclean_region_list.hd() != NULL && should_zf())
  5248       ZF_mon->notify_all();
  5250   return res;
  5253 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
  5254   assert(ZF_mon->owned_by_self(), "precondition.");
  5255   return _unclean_region_list.hd();
  5259 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
  5260   assert(ZF_mon->owned_by_self(), "Precondition");
  5261   HeapRegion* r = peek_unclean_region_list_locked();
  5262   if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
  5263     // Result of below must be equal to "r", since we hold the lock.
  5264     (void)pop_unclean_region_list_locked();
  5265     put_free_region_on_list_locked(r);
  5266     return true;
  5267   } else {
  5268     return false;
  5272 bool G1CollectedHeap::move_cleaned_region_to_free_list() {
  5273   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5274   return move_cleaned_region_to_free_list_locked();
  5278 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
  5279   assert(ZF_mon->owned_by_self(), "precondition.");
  5280   assert(_free_region_list_size == free_region_list_length(), "Inv");
  5281   assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
  5282         "Regions on free list must be zero filled");
  5283   assert(!r->isHumongous(), "Must not be humongous.");
  5284   assert(r->is_empty(), "Better be empty");
  5285   assert(!r->is_on_free_list(),
  5286          "Better not already be on free list");
  5287   assert(!r->is_on_unclean_list(),
  5288          "Better not already be on unclean list");
  5289   r->set_on_free_list(true);
  5290   r->set_next_on_free_list(_free_region_list);
  5291   _free_region_list = r;
  5292   _free_region_list_size++;
  5293   assert(_free_region_list_size == free_region_list_length(), "Inv");
  5296 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
  5297   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5298   put_free_region_on_list_locked(r);
  5301 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
  5302   assert(ZF_mon->owned_by_self(), "precondition.");
  5303   assert(_free_region_list_size == free_region_list_length(), "Inv");
  5304   HeapRegion* res = _free_region_list;
  5305   if (res != NULL) {
  5306     _free_region_list = res->next_from_free_list();
  5307     _free_region_list_size--;
  5308     res->set_on_free_list(false);
  5309     res->set_next_on_free_list(NULL);
  5310     assert(_free_region_list_size == free_region_list_length(), "Inv");
  5312   return res;
  5316 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
  5317   // By self, or on behalf of self.
  5318   assert(Heap_lock->is_locked(), "Precondition");
  5319   HeapRegion* res = NULL;
  5320   bool first = true;
  5321   while (res == NULL) {
  5322     if (zero_filled || !first) {
  5323       MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5324       res = pop_free_region_list_locked();
  5325       if (res != NULL) {
  5326         assert(!res->zero_fill_is_allocated(),
  5327                "No allocated regions on free list.");
  5328         res->set_zero_fill_allocated();
  5329       } else if (!first) {
  5330         break;  // We tried both, time to return NULL.
  5334     if (res == NULL) {
  5335       res = alloc_region_from_unclean_list(zero_filled);
  5337     assert(res == NULL ||
  5338            !zero_filled ||
  5339            res->zero_fill_is_allocated(),
  5340            "We must have allocated the region we're returning");
  5341     first = false;
  5343   return res;
  5346 void G1CollectedHeap::remove_allocated_regions_from_lists() {
  5347   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5349     HeapRegion* prev = NULL;
  5350     HeapRegion* cur = _unclean_region_list.hd();
  5351     while (cur != NULL) {
  5352       HeapRegion* next = cur->next_from_unclean_list();
  5353       if (cur->zero_fill_is_allocated()) {
  5354         // Remove from the list.
  5355         if (prev == NULL) {
  5356           (void)_unclean_region_list.pop();
  5357         } else {
  5358           _unclean_region_list.delete_after(prev);
  5360         cur->set_on_unclean_list(false);
  5361         cur->set_next_on_unclean_list(NULL);
  5362       } else {
  5363         prev = cur;
  5365       cur = next;
  5367     assert(_unclean_region_list.sz() == unclean_region_list_length(),
  5368            "Inv");
  5372     HeapRegion* prev = NULL;
  5373     HeapRegion* cur = _free_region_list;
  5374     while (cur != NULL) {
  5375       HeapRegion* next = cur->next_from_free_list();
  5376       if (cur->zero_fill_is_allocated()) {
  5377         // Remove from the list.
  5378         if (prev == NULL) {
  5379           _free_region_list = cur->next_from_free_list();
  5380         } else {
  5381           prev->set_next_on_free_list(cur->next_from_free_list());
  5383         cur->set_on_free_list(false);
  5384         cur->set_next_on_free_list(NULL);
  5385         _free_region_list_size--;
  5386       } else {
  5387         prev = cur;
  5389       cur = next;
  5391     assert(_free_region_list_size == free_region_list_length(), "Inv");
  5395 bool G1CollectedHeap::verify_region_lists() {
  5396   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5397   return verify_region_lists_locked();
  5400 bool G1CollectedHeap::verify_region_lists_locked() {
  5401   HeapRegion* unclean = _unclean_region_list.hd();
  5402   while (unclean != NULL) {
  5403     guarantee(unclean->is_on_unclean_list(), "Well, it is!");
  5404     guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
  5405     guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
  5406               "Everything else is possible.");
  5407     unclean = unclean->next_from_unclean_list();
  5409   guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
  5411   HeapRegion* free_r = _free_region_list;
  5412   while (free_r != NULL) {
  5413     assert(free_r->is_on_free_list(), "Well, it is!");
  5414     assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
  5415     switch (free_r->zero_fill_state()) {
  5416     case HeapRegion::NotZeroFilled:
  5417     case HeapRegion::ZeroFilling:
  5418       guarantee(false, "Should not be on free list.");
  5419       break;
  5420     default:
  5421       // Everything else is possible.
  5422       break;
  5424     free_r = free_r->next_from_free_list();
  5426   guarantee(_free_region_list_size == free_region_list_length(), "Inv");
  5427   // If we didn't do an assertion...
  5428   return true;
  5431 size_t G1CollectedHeap::free_region_list_length() {
  5432   assert(ZF_mon->owned_by_self(), "precondition.");
  5433   size_t len = 0;
  5434   HeapRegion* cur = _free_region_list;
  5435   while (cur != NULL) {
  5436     len++;
  5437     cur = cur->next_from_free_list();
  5439   return len;
  5442 size_t G1CollectedHeap::unclean_region_list_length() {
  5443   assert(ZF_mon->owned_by_self(), "precondition.");
  5444   return _unclean_region_list.length();
  5447 size_t G1CollectedHeap::n_regions() {
  5448   return _hrs->length();
  5451 size_t G1CollectedHeap::max_regions() {
  5452   return
  5453     (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
  5454     HeapRegion::GrainBytes;
  5457 size_t G1CollectedHeap::free_regions() {
  5458   /* Possibly-expensive assert.
  5459   assert(_free_regions == count_free_regions(),
  5460          "_free_regions is off.");
  5461   */
  5462   return _free_regions;
  5465 bool G1CollectedHeap::should_zf() {
  5466   return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
  5469 class RegionCounter: public HeapRegionClosure {
  5470   size_t _n;
  5471 public:
  5472   RegionCounter() : _n(0) {}
  5473   bool doHeapRegion(HeapRegion* r) {
  5474     if (r->is_empty()) {
  5475       assert(!r->isHumongous(), "H regions should not be empty.");
  5476       _n++;
  5478     return false;
  5480   int res() { return (int) _n; }
  5481 };
  5483 size_t G1CollectedHeap::count_free_regions() {
  5484   RegionCounter rc;
  5485   heap_region_iterate(&rc);
  5486   size_t n = rc.res();
  5487   if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
  5488     n--;
  5489   return n;
  5492 size_t G1CollectedHeap::count_free_regions_list() {
  5493   size_t n = 0;
  5494   size_t o = 0;
  5495   ZF_mon->lock_without_safepoint_check();
  5496   HeapRegion* cur = _free_region_list;
  5497   while (cur != NULL) {
  5498     cur = cur->next_from_free_list();
  5499     n++;
  5501   size_t m = unclean_region_list_length();
  5502   ZF_mon->unlock();
  5503   return n + m;
  5506 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  5507   assert(heap_lock_held_for_gc(),
  5508               "the heap lock should already be held by or for this thread");
  5509   _young_list->push_region(hr);
  5510   g1_policy()->set_region_short_lived(hr);
  5513 class NoYoungRegionsClosure: public HeapRegionClosure {
  5514 private:
  5515   bool _success;
  5516 public:
  5517   NoYoungRegionsClosure() : _success(true) { }
  5518   bool doHeapRegion(HeapRegion* r) {
  5519     if (r->is_young()) {
  5520       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  5521                              r->bottom(), r->end());
  5522       _success = false;
  5524     return false;
  5526   bool success() { return _success; }
  5527 };
  5529 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
  5530   bool ret = _young_list->check_list_empty(check_sample);
  5532   if (check_heap) {
  5533     NoYoungRegionsClosure closure;
  5534     heap_region_iterate(&closure);
  5535     ret = ret && closure.success();
  5538   return ret;
  5541 void G1CollectedHeap::empty_young_list() {
  5542   assert(heap_lock_held_for_gc(),
  5543               "the heap lock should already be held by or for this thread");
  5544   assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
  5546   _young_list->empty_list();
  5549 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
  5550   bool no_allocs = true;
  5551   for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
  5552     HeapRegion* r = _gc_alloc_regions[ap];
  5553     no_allocs = r == NULL || r->saved_mark_at_top();
  5555   return no_allocs;
  5558 void G1CollectedHeap::retire_all_alloc_regions() {
  5559   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  5560     HeapRegion* r = _gc_alloc_regions[ap];
  5561     if (r != NULL) {
  5562       // Check for aliases.
  5563       bool has_processed_alias = false;
  5564       for (int i = 0; i < ap; ++i) {
  5565         if (_gc_alloc_regions[i] == r) {
  5566           has_processed_alias = true;
  5567           break;
  5570       if (!has_processed_alias) {
  5571         retire_alloc_region(r, false /* par */);
  5578 // Done at the start of full GC.
  5579 void G1CollectedHeap::tear_down_region_lists() {
  5580   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5581   while (pop_unclean_region_list_locked() != NULL) ;
  5582   assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
  5583          "Postconditions of loop.");
  5584   while (pop_free_region_list_locked() != NULL) ;
  5585   assert(_free_region_list == NULL, "Postcondition of loop.");
  5586   if (_free_region_list_size != 0) {
  5587     gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
  5588     print_on(gclog_or_tty, true /* extended */);
  5590   assert(_free_region_list_size == 0, "Postconditions of loop.");
  5594 class RegionResetter: public HeapRegionClosure {
  5595   G1CollectedHeap* _g1;
  5596   int _n;
  5597 public:
  5598   RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  5599   bool doHeapRegion(HeapRegion* r) {
  5600     if (r->continuesHumongous()) return false;
  5601     if (r->top() > r->bottom()) {
  5602       if (r->top() < r->end()) {
  5603         Copy::fill_to_words(r->top(),
  5604                           pointer_delta(r->end(), r->top()));
  5606       r->set_zero_fill_allocated();
  5607     } else {
  5608       assert(r->is_empty(), "tautology");
  5609       _n++;
  5610       switch (r->zero_fill_state()) {
  5611         case HeapRegion::NotZeroFilled:
  5612         case HeapRegion::ZeroFilling:
  5613           _g1->put_region_on_unclean_list_locked(r);
  5614           break;
  5615         case HeapRegion::Allocated:
  5616           r->set_zero_fill_complete();
  5617           // no break; go on to put on free list.
  5618         case HeapRegion::ZeroFilled:
  5619           _g1->put_free_region_on_list_locked(r);
  5620           break;
  5623     return false;
  5626   int getFreeRegionCount() {return _n;}
  5627 };
  5629 // Done at the end of full GC.
  5630 void G1CollectedHeap::rebuild_region_lists() {
  5631   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5632   // This needs to go at the end of the full GC.
  5633   RegionResetter rs;
  5634   heap_region_iterate(&rs);
  5635   _free_regions = rs.getFreeRegionCount();
  5636   // Tell the ZF thread it may have work to do.
  5637   if (should_zf()) ZF_mon->notify_all();
  5640 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
  5641   G1CollectedHeap* _g1;
  5642   int _n;
  5643 public:
  5644   UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  5645   bool doHeapRegion(HeapRegion* r) {
  5646     if (r->continuesHumongous()) return false;
  5647     if (r->top() > r->bottom()) {
  5648       // There are assertions in "set_zero_fill_needed()" below that
  5649       // require top() == bottom(), so this is technically illegal.
  5650       // We'll skirt the law here, by making that true temporarily.
  5651       DEBUG_ONLY(HeapWord* save_top = r->top();
  5652                  r->set_top(r->bottom()));
  5653       r->set_zero_fill_needed();
  5654       DEBUG_ONLY(r->set_top(save_top));
  5656     return false;
  5658 };
  5660 // Done at the start of full GC.
  5661 void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
  5662   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5663   // This needs to go at the end of the full GC.
  5664   UsedRegionsNeedZeroFillSetter rs;
  5665   heap_region_iterate(&rs);
  5668 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  5669   _refine_cte_cl->set_concurrent(concurrent);
  5672 #ifndef PRODUCT
  5674 class PrintHeapRegionClosure: public HeapRegionClosure {
  5675 public:
  5676   bool doHeapRegion(HeapRegion *r) {
  5677     gclog_or_tty->print("Region: "PTR_FORMAT":", r);
  5678     if (r != NULL) {
  5679       if (r->is_on_free_list())
  5680         gclog_or_tty->print("Free ");
  5681       if (r->is_young())
  5682         gclog_or_tty->print("Young ");
  5683       if (r->isHumongous())
  5684         gclog_or_tty->print("Is Humongous ");
  5685       r->print();
  5687     return false;
  5689 };
  5691 class SortHeapRegionClosure : public HeapRegionClosure {
  5692   size_t young_regions,free_regions, unclean_regions;
  5693   size_t hum_regions, count;
  5694   size_t unaccounted, cur_unclean, cur_alloc;
  5695   size_t total_free;
  5696   HeapRegion* cur;
  5697 public:
  5698   SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
  5699     free_regions(0), unclean_regions(0),
  5700     hum_regions(0),
  5701     count(0), unaccounted(0),
  5702     cur_alloc(0), total_free(0)
  5703   {}
  5704   bool doHeapRegion(HeapRegion *r) {
  5705     count++;
  5706     if (r->is_on_free_list()) free_regions++;
  5707     else if (r->is_on_unclean_list()) unclean_regions++;
  5708     else if (r->isHumongous())  hum_regions++;
  5709     else if (r->is_young()) young_regions++;
  5710     else if (r == cur) cur_alloc++;
  5711     else unaccounted++;
  5712     return false;
  5714   void print() {
  5715     total_free = free_regions + unclean_regions;
  5716     gclog_or_tty->print("%d regions\n", count);
  5717     gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
  5718                         total_free, free_regions, unclean_regions);
  5719     gclog_or_tty->print("%d humongous %d young\n",
  5720                         hum_regions, young_regions);
  5721     gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
  5722     gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
  5724 };
  5726 void G1CollectedHeap::print_region_counts() {
  5727   SortHeapRegionClosure sc(_cur_alloc_region);
  5728   PrintHeapRegionClosure cl;
  5729   heap_region_iterate(&cl);
  5730   heap_region_iterate(&sc);
  5731   sc.print();
  5732   print_region_accounting_info();
  5733 };
  5735 bool G1CollectedHeap::regions_accounted_for() {
  5736   // TODO: regions accounting for young/survivor/tenured
  5737   return true;
  5740 bool G1CollectedHeap::print_region_accounting_info() {
  5741   gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
  5742                          free_regions(),
  5743                          count_free_regions(), count_free_regions_list(),
  5744                          _free_region_list_size, _unclean_region_list.sz());
  5745   gclog_or_tty->print_cr("cur_alloc: %d.",
  5746                          (_cur_alloc_region == NULL ? 0 : 1));
  5747   gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
  5749   // TODO: check regions accounting for young/survivor/tenured
  5750   return true;
  5753 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  5754   HeapRegion* hr = heap_region_containing(p);
  5755   if (hr == NULL) {
  5756     return is_in_permanent(p);
  5757   } else {
  5758     return hr->is_in(p);
  5761 #endif // !PRODUCT
  5763 void G1CollectedHeap::g1_unimplemented() {
  5764   // Unimplemented();

mercurial