src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Thu, 11 Jun 2009 17:19:33 -0700

author
johnc
date
Thu, 11 Jun 2009 17:19:33 -0700
changeset 1242
d44bdab1c03d
parent 1231
29e7d79232b9
child 1246
830ca2573896
permissions
-rw-r--r--

6843694: G1: assert(index < _vs.committed_size(),"bad index"), g1BlockOffsetTable.inline.hpp:55
Summary: For heaps larger than 32Gb, the number of heap regions overflows the data type used to hold the region index in the SparsePRT structure. Changed the region indexes, card indexes, and RSet hash table buckets to ints and added some size overflow guarantees.
Reviewed-by: ysr, tonyp

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_g1CollectedHeap.cpp.incl"
    28 // turn it on so that the contents of the young list (scan-only /
    29 // to-be-collected) are printed at "strategic" points before / during
    30 // / after the collection --- this is useful for debugging
    31 #define SCAN_ONLY_VERBOSE 0
    32 // CURRENT STATUS
    33 // This file is under construction.  Search for "FIXME".
    35 // INVARIANTS/NOTES
    36 //
    37 // All allocation activity covered by the G1CollectedHeap interface is
    38 //   serialized by acquiring the HeapLock.  This happens in
    39 //   mem_allocate_work, which all such allocation functions call.
    40 //   (Note that this does not apply to TLAB allocation, which is not part
    41 //   of this interface: it is done by clients of this interface.)
    43 // Local to this file.
    45 class RefineCardTableEntryClosure: public CardTableEntryClosure {
    46   SuspendibleThreadSet* _sts;
    47   G1RemSet* _g1rs;
    48   ConcurrentG1Refine* _cg1r;
    49   bool _concurrent;
    50 public:
    51   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
    52                               G1RemSet* g1rs,
    53                               ConcurrentG1Refine* cg1r) :
    54     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
    55   {}
    56   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    57     _g1rs->concurrentRefineOneCard(card_ptr, worker_i);
    58     if (_concurrent && _sts->should_yield()) {
    59       // Caller will actually yield.
    60       return false;
    61     }
    62     // Otherwise, we finished successfully; return true.
    63     return true;
    64   }
    65   void set_concurrent(bool b) { _concurrent = b; }
    66 };
    69 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
    70   int _calls;
    71   G1CollectedHeap* _g1h;
    72   CardTableModRefBS* _ctbs;
    73   int _histo[256];
    74 public:
    75   ClearLoggedCardTableEntryClosure() :
    76     _calls(0)
    77   {
    78     _g1h = G1CollectedHeap::heap();
    79     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
    80     for (int i = 0; i < 256; i++) _histo[i] = 0;
    81   }
    82   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    83     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
    84       _calls++;
    85       unsigned char* ujb = (unsigned char*)card_ptr;
    86       int ind = (int)(*ujb);
    87       _histo[ind]++;
    88       *card_ptr = -1;
    89     }
    90     return true;
    91   }
    92   int calls() { return _calls; }
    93   void print_histo() {
    94     gclog_or_tty->print_cr("Card table value histogram:");
    95     for (int i = 0; i < 256; i++) {
    96       if (_histo[i] != 0) {
    97         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
    98       }
    99     }
   100   }
   101 };
   103 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
   104   int _calls;
   105   G1CollectedHeap* _g1h;
   106   CardTableModRefBS* _ctbs;
   107 public:
   108   RedirtyLoggedCardTableEntryClosure() :
   109     _calls(0)
   110   {
   111     _g1h = G1CollectedHeap::heap();
   112     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   113   }
   114   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   115     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   116       _calls++;
   117       *card_ptr = 0;
   118     }
   119     return true;
   120   }
   121   int calls() { return _calls; }
   122 };
   124 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
   125 public:
   126   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   127     *card_ptr = CardTableModRefBS::dirty_card_val();
   128     return true;
   129   }
   130 };
   132 YoungList::YoungList(G1CollectedHeap* g1h)
   133   : _g1h(g1h), _head(NULL),
   134     _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
   135     _length(0), _scan_only_length(0),
   136     _last_sampled_rs_lengths(0),
   137     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
   138 {
   139   guarantee( check_list_empty(false), "just making sure..." );
   140 }
   142 void YoungList::push_region(HeapRegion *hr) {
   143   assert(!hr->is_young(), "should not already be young");
   144   assert(hr->get_next_young_region() == NULL, "cause it should!");
   146   hr->set_next_young_region(_head);
   147   _head = hr;
   149   hr->set_young();
   150   double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
   151   ++_length;
   152 }
   154 void YoungList::add_survivor_region(HeapRegion* hr) {
   155   assert(hr->is_survivor(), "should be flagged as survivor region");
   156   assert(hr->get_next_young_region() == NULL, "cause it should!");
   158   hr->set_next_young_region(_survivor_head);
   159   if (_survivor_head == NULL) {
   160     _survivor_tail = hr;
   161   }
   162   _survivor_head = hr;
   164   ++_survivor_length;
   165 }
   167 HeapRegion* YoungList::pop_region() {
   168   while (_head != NULL) {
   169     assert( length() > 0, "list should not be empty" );
   170     HeapRegion* ret = _head;
   171     _head = ret->get_next_young_region();
   172     ret->set_next_young_region(NULL);
   173     --_length;
   174     assert(ret->is_young(), "region should be very young");
   176     // Replace 'Survivor' region type with 'Young'. So the region will
   177     // be treated as a young region and will not be 'confused' with
   178     // newly created survivor regions.
   179     if (ret->is_survivor()) {
   180       ret->set_young();
   181     }
   183     if (!ret->is_scan_only()) {
   184       return ret;
   185     }
   187     // scan-only, we'll add it to the scan-only list
   188     if (_scan_only_tail == NULL) {
   189       guarantee( _scan_only_head == NULL, "invariant" );
   191       _scan_only_head = ret;
   192       _curr_scan_only = ret;
   193     } else {
   194       guarantee( _scan_only_head != NULL, "invariant" );
   195       _scan_only_tail->set_next_young_region(ret);
   196     }
   197     guarantee( ret->get_next_young_region() == NULL, "invariant" );
   198     _scan_only_tail = ret;
   200     // no need to be tagged as scan-only any more
   201     ret->set_young();
   203     ++_scan_only_length;
   204   }
   205   assert( length() == 0, "list should be empty" );
   206   return NULL;
   207 }
   209 void YoungList::empty_list(HeapRegion* list) {
   210   while (list != NULL) {
   211     HeapRegion* next = list->get_next_young_region();
   212     list->set_next_young_region(NULL);
   213     list->uninstall_surv_rate_group();
   214     list->set_not_young();
   215     list = next;
   216   }
   217 }
   219 void YoungList::empty_list() {
   220   assert(check_list_well_formed(), "young list should be well formed");
   222   empty_list(_head);
   223   _head = NULL;
   224   _length = 0;
   226   empty_list(_scan_only_head);
   227   _scan_only_head = NULL;
   228   _scan_only_tail = NULL;
   229   _scan_only_length = 0;
   230   _curr_scan_only = NULL;
   232   empty_list(_survivor_head);
   233   _survivor_head = NULL;
   234   _survivor_tail = NULL;
   235   _survivor_length = 0;
   237   _last_sampled_rs_lengths = 0;
   239   assert(check_list_empty(false), "just making sure...");
   240 }
   242 bool YoungList::check_list_well_formed() {
   243   bool ret = true;
   245   size_t length = 0;
   246   HeapRegion* curr = _head;
   247   HeapRegion* last = NULL;
   248   while (curr != NULL) {
   249     if (!curr->is_young() || curr->is_scan_only()) {
   250       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   251                              "incorrectly tagged (%d, %d)",
   252                              curr->bottom(), curr->end(),
   253                              curr->is_young(), curr->is_scan_only());
   254       ret = false;
   255     }
   256     ++length;
   257     last = curr;
   258     curr = curr->get_next_young_region();
   259   }
   260   ret = ret && (length == _length);
   262   if (!ret) {
   263     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   264     gclog_or_tty->print_cr("###   list has %d entries, _length is %d",
   265                            length, _length);
   266   }
   268   bool scan_only_ret = true;
   269   length = 0;
   270   curr = _scan_only_head;
   271   last = NULL;
   272   while (curr != NULL) {
   273     if (!curr->is_young() || curr->is_scan_only()) {
   274       gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
   275                              "incorrectly tagged (%d, %d)",
   276                              curr->bottom(), curr->end(),
   277                              curr->is_young(), curr->is_scan_only());
   278       scan_only_ret = false;
   279     }
   280     ++length;
   281     last = curr;
   282     curr = curr->get_next_young_region();
   283   }
   284   scan_only_ret = scan_only_ret && (length == _scan_only_length);
   286   if ( (last != _scan_only_tail) ||
   287        (_scan_only_head == NULL && _scan_only_tail != NULL) ||
   288        (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
   289      gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
   290      scan_only_ret = false;
   291   }
   293   if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
   294     gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
   295     scan_only_ret = false;
   296    }
   298   if (!scan_only_ret) {
   299     gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
   300     gclog_or_tty->print_cr("###   list has %d entries, _scan_only_length is %d",
   301                   length, _scan_only_length);
   302   }
   304   return ret && scan_only_ret;
   305 }
   307 bool YoungList::check_list_empty(bool ignore_scan_only_list,
   308                                  bool check_sample) {
   309   bool ret = true;
   311   if (_length != 0) {
   312     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
   313                   _length);
   314     ret = false;
   315   }
   316   if (check_sample && _last_sampled_rs_lengths != 0) {
   317     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   318     ret = false;
   319   }
   320   if (_head != NULL) {
   321     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   322     ret = false;
   323   }
   324   if (!ret) {
   325     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   326   }
   328   if (ignore_scan_only_list)
   329     return ret;
   331   bool scan_only_ret = true;
   332   if (_scan_only_length != 0) {
   333     gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
   334                   _scan_only_length);
   335     scan_only_ret = false;
   336   }
   337   if (_scan_only_head != NULL) {
   338     gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
   339      scan_only_ret = false;
   340   }
   341   if (_scan_only_tail != NULL) {
   342     gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
   343     scan_only_ret = false;
   344   }
   345   if (!scan_only_ret) {
   346     gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
   347   }
   349   return ret && scan_only_ret;
   350 }
   352 void
   353 YoungList::rs_length_sampling_init() {
   354   _sampled_rs_lengths = 0;
   355   _curr               = _head;
   356 }
   358 bool
   359 YoungList::rs_length_sampling_more() {
   360   return _curr != NULL;
   361 }
   363 void
   364 YoungList::rs_length_sampling_next() {
   365   assert( _curr != NULL, "invariant" );
   366   _sampled_rs_lengths += _curr->rem_set()->occupied();
   367   _curr = _curr->get_next_young_region();
   368   if (_curr == NULL) {
   369     _last_sampled_rs_lengths = _sampled_rs_lengths;
   370     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   371   }
   372 }
   374 void
   375 YoungList::reset_auxilary_lists() {
   376   // We could have just "moved" the scan-only list to the young list.
   377   // However, the scan-only list is ordered according to the region
   378   // age in descending order, so, by moving one entry at a time, we
   379   // ensure that it is recreated in ascending order.
   381   guarantee( is_empty(), "young list should be empty" );
   382   assert(check_list_well_formed(), "young list should be well formed");
   384   // Add survivor regions to SurvRateGroup.
   385   _g1h->g1_policy()->note_start_adding_survivor_regions();
   386   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   387   for (HeapRegion* curr = _survivor_head;
   388        curr != NULL;
   389        curr = curr->get_next_young_region()) {
   390     _g1h->g1_policy()->set_region_survivors(curr);
   391   }
   392   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   394   if (_survivor_head != NULL) {
   395     _head           = _survivor_head;
   396     _length         = _survivor_length + _scan_only_length;
   397     _survivor_tail->set_next_young_region(_scan_only_head);
   398   } else {
   399     _head           = _scan_only_head;
   400     _length         = _scan_only_length;
   401   }
   403   for (HeapRegion* curr = _scan_only_head;
   404        curr != NULL;
   405        curr = curr->get_next_young_region()) {
   406     curr->recalculate_age_in_surv_rate_group();
   407   }
   408   _scan_only_head   = NULL;
   409   _scan_only_tail   = NULL;
   410   _scan_only_length = 0;
   411   _curr_scan_only   = NULL;
   413   _survivor_head    = NULL;
   414   _survivor_tail   = NULL;
   415   _survivor_length  = 0;
   416   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   418   assert(check_list_well_formed(), "young list should be well formed");
   419 }
   421 void YoungList::print() {
   422   HeapRegion* lists[] = {_head,   _scan_only_head, _survivor_head};
   423   const char* names[] = {"YOUNG", "SCAN-ONLY",     "SURVIVOR"};
   425   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   426     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   427     HeapRegion *curr = lists[list];
   428     if (curr == NULL)
   429       gclog_or_tty->print_cr("  empty");
   430     while (curr != NULL) {
   431       gclog_or_tty->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
   432                              "age: %4d, y: %d, s-o: %d, surv: %d",
   433                              curr->bottom(), curr->end(),
   434                              curr->top(),
   435                              curr->prev_top_at_mark_start(),
   436                              curr->next_top_at_mark_start(),
   437                              curr->top_at_conc_mark_count(),
   438                              curr->age_in_surv_rate_group_cond(),
   439                              curr->is_young(),
   440                              curr->is_scan_only(),
   441                              curr->is_survivor());
   442       curr = curr->get_next_young_region();
   443     }
   444   }
   446   gclog_or_tty->print_cr("");
   447 }
   449 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   450 {
   451   // Claim the right to put the region on the dirty cards region list
   452   // by installing a self pointer.
   453   HeapRegion* next = hr->get_next_dirty_cards_region();
   454   if (next == NULL) {
   455     HeapRegion* res = (HeapRegion*)
   456       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   457                           NULL);
   458     if (res == NULL) {
   459       HeapRegion* head;
   460       do {
   461         // Put the region to the dirty cards region list.
   462         head = _dirty_cards_region_list;
   463         next = (HeapRegion*)
   464           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   465         if (next == head) {
   466           assert(hr->get_next_dirty_cards_region() == hr,
   467                  "hr->get_next_dirty_cards_region() != hr");
   468           if (next == NULL) {
   469             // The last region in the list points to itself.
   470             hr->set_next_dirty_cards_region(hr);
   471           } else {
   472             hr->set_next_dirty_cards_region(next);
   473           }
   474         }
   475       } while (next != head);
   476     }
   477   }
   478 }
   480 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   481 {
   482   HeapRegion* head;
   483   HeapRegion* hr;
   484   do {
   485     head = _dirty_cards_region_list;
   486     if (head == NULL) {
   487       return NULL;
   488     }
   489     HeapRegion* new_head = head->get_next_dirty_cards_region();
   490     if (head == new_head) {
   491       // The last region.
   492       new_head = NULL;
   493     }
   494     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   495                                           head);
   496   } while (hr != head);
   497   assert(hr != NULL, "invariant");
   498   hr->set_next_dirty_cards_region(NULL);
   499   return hr;
   500 }
   502 void G1CollectedHeap::stop_conc_gc_threads() {
   503   _cg1r->stop();
   504   _czft->stop();
   505   _cmThread->stop();
   506 }
   509 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   510   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   511   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   513   // Count the dirty cards at the start.
   514   CountNonCleanMemRegionClosure count1(this);
   515   ct_bs->mod_card_iterate(&count1);
   516   int orig_count = count1.n();
   518   // First clear the logged cards.
   519   ClearLoggedCardTableEntryClosure clear;
   520   dcqs.set_closure(&clear);
   521   dcqs.apply_closure_to_all_completed_buffers();
   522   dcqs.iterate_closure_all_threads(false);
   523   clear.print_histo();
   525   // Now ensure that there's no dirty cards.
   526   CountNonCleanMemRegionClosure count2(this);
   527   ct_bs->mod_card_iterate(&count2);
   528   if (count2.n() != 0) {
   529     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   530                            count2.n(), orig_count);
   531   }
   532   guarantee(count2.n() == 0, "Card table should be clean.");
   534   RedirtyLoggedCardTableEntryClosure redirty;
   535   JavaThread::dirty_card_queue_set().set_closure(&redirty);
   536   dcqs.apply_closure_to_all_completed_buffers();
   537   dcqs.iterate_closure_all_threads(false);
   538   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   539                          clear.calls(), orig_count);
   540   guarantee(redirty.calls() == clear.calls(),
   541             "Or else mechanism is broken.");
   543   CountNonCleanMemRegionClosure count3(this);
   544   ct_bs->mod_card_iterate(&count3);
   545   if (count3.n() != orig_count) {
   546     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   547                            orig_count, count3.n());
   548     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   549   }
   551   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
   552 }
   554 // Private class members.
   556 G1CollectedHeap* G1CollectedHeap::_g1h;
   558 // Private methods.
   560 // Finds a HeapRegion that can be used to allocate a given size of block.
   563 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
   564                                                  bool do_expand,
   565                                                  bool zero_filled) {
   566   ConcurrentZFThread::note_region_alloc();
   567   HeapRegion* res = alloc_free_region_from_lists(zero_filled);
   568   if (res == NULL && do_expand) {
   569     expand(word_size * HeapWordSize);
   570     res = alloc_free_region_from_lists(zero_filled);
   571     assert(res == NULL ||
   572            (!res->isHumongous() &&
   573             (!zero_filled ||
   574              res->zero_fill_state() == HeapRegion::Allocated)),
   575            "Alloc Regions must be zero filled (and non-H)");
   576   }
   577   if (res != NULL && res->is_empty()) _free_regions--;
   578   assert(res == NULL ||
   579          (!res->isHumongous() &&
   580           (!zero_filled ||
   581            res->zero_fill_state() == HeapRegion::Allocated)),
   582          "Non-young alloc Regions must be zero filled (and non-H)");
   584   if (G1PrintRegions) {
   585     if (res != NULL) {
   586       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
   587                              "top "PTR_FORMAT,
   588                              res->hrs_index(), res->bottom(), res->end(), res->top());
   589     }
   590   }
   592   return res;
   593 }
   595 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
   596                                                          size_t word_size,
   597                                                          bool zero_filled) {
   598   HeapRegion* alloc_region = NULL;
   599   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   600     alloc_region = newAllocRegion_work(word_size, true, zero_filled);
   601     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   602       alloc_region->set_survivor();
   603     }
   604     ++_gc_alloc_region_counts[purpose];
   605   } else {
   606     g1_policy()->note_alloc_region_limit_reached(purpose);
   607   }
   608   return alloc_region;
   609 }
   611 // If could fit into free regions w/o expansion, try.
   612 // Otherwise, if can expand, do so.
   613 // Otherwise, if using ex regions might help, try with ex given back.
   614 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
   615   assert(regions_accounted_for(), "Region leakage!");
   617   // We can't allocate H regions while cleanupComplete is running, since
   618   // some of the regions we find to be empty might not yet be added to the
   619   // unclean list.  (If we're already at a safepoint, this call is
   620   // unnecessary, not to mention wrong.)
   621   if (!SafepointSynchronize::is_at_safepoint())
   622     wait_for_cleanup_complete();
   624   size_t num_regions =
   625     round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   627   // Special case if < one region???
   629   // Remember the ft size.
   630   size_t x_size = expansion_regions();
   632   HeapWord* res = NULL;
   633   bool eliminated_allocated_from_lists = false;
   635   // Can the allocation potentially fit in the free regions?
   636   if (free_regions() >= num_regions) {
   637     res = _hrs->obj_allocate(word_size);
   638   }
   639   if (res == NULL) {
   640     // Try expansion.
   641     size_t fs = _hrs->free_suffix();
   642     if (fs + x_size >= num_regions) {
   643       expand((num_regions - fs) * HeapRegion::GrainBytes);
   644       res = _hrs->obj_allocate(word_size);
   645       assert(res != NULL, "This should have worked.");
   646     } else {
   647       // Expansion won't help.  Are there enough free regions if we get rid
   648       // of reservations?
   649       size_t avail = free_regions();
   650       if (avail >= num_regions) {
   651         res = _hrs->obj_allocate(word_size);
   652         if (res != NULL) {
   653           remove_allocated_regions_from_lists();
   654           eliminated_allocated_from_lists = true;
   655         }
   656       }
   657     }
   658   }
   659   if (res != NULL) {
   660     // Increment by the number of regions allocated.
   661     // FIXME: Assumes regions all of size GrainBytes.
   662 #ifndef PRODUCT
   663     mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
   664                                            HeapRegion::GrainWords));
   665 #endif
   666     if (!eliminated_allocated_from_lists)
   667       remove_allocated_regions_from_lists();
   668     _summary_bytes_used += word_size * HeapWordSize;
   669     _free_regions -= num_regions;
   670     _num_humongous_regions += (int) num_regions;
   671   }
   672   assert(regions_accounted_for(), "Region Leakage");
   673   return res;
   674 }
   676 HeapWord*
   677 G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   678                                          bool permit_collection_pause) {
   679   HeapWord* res = NULL;
   680   HeapRegion* allocated_young_region = NULL;
   682   assert( SafepointSynchronize::is_at_safepoint() ||
   683           Heap_lock->owned_by_self(), "pre condition of the call" );
   685   if (isHumongous(word_size)) {
   686     // Allocation of a humongous object can, in a sense, complete a
   687     // partial region, if the previous alloc was also humongous, and
   688     // caused the test below to succeed.
   689     if (permit_collection_pause)
   690       do_collection_pause_if_appropriate(word_size);
   691     res = humongousObjAllocate(word_size);
   692     assert(_cur_alloc_region == NULL
   693            || !_cur_alloc_region->isHumongous(),
   694            "Prevent a regression of this bug.");
   696   } else {
   697     // We may have concurrent cleanup working at the time. Wait for it
   698     // to complete. In the future we would probably want to make the
   699     // concurrent cleanup truly concurrent by decoupling it from the
   700     // allocation.
   701     if (!SafepointSynchronize::is_at_safepoint())
   702       wait_for_cleanup_complete();
   703     // If we do a collection pause, this will be reset to a non-NULL
   704     // value.  If we don't, nulling here ensures that we allocate a new
   705     // region below.
   706     if (_cur_alloc_region != NULL) {
   707       // We're finished with the _cur_alloc_region.
   708       _summary_bytes_used += _cur_alloc_region->used();
   709       _cur_alloc_region = NULL;
   710     }
   711     assert(_cur_alloc_region == NULL, "Invariant.");
   712     // Completion of a heap region is perhaps a good point at which to do
   713     // a collection pause.
   714     if (permit_collection_pause)
   715       do_collection_pause_if_appropriate(word_size);
   716     // Make sure we have an allocation region available.
   717     if (_cur_alloc_region == NULL) {
   718       if (!SafepointSynchronize::is_at_safepoint())
   719         wait_for_cleanup_complete();
   720       bool next_is_young = should_set_young_locked();
   721       // If the next region is not young, make sure it's zero-filled.
   722       _cur_alloc_region = newAllocRegion(word_size, !next_is_young);
   723       if (_cur_alloc_region != NULL) {
   724         _summary_bytes_used -= _cur_alloc_region->used();
   725         if (next_is_young) {
   726           set_region_short_lived_locked(_cur_alloc_region);
   727           allocated_young_region = _cur_alloc_region;
   728         }
   729       }
   730     }
   731     assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
   732            "Prevent a regression of this bug.");
   734     // Now retry the allocation.
   735     if (_cur_alloc_region != NULL) {
   736       res = _cur_alloc_region->allocate(word_size);
   737     }
   738   }
   740   // NOTE: fails frequently in PRT
   741   assert(regions_accounted_for(), "Region leakage!");
   743   if (res != NULL) {
   744     if (!SafepointSynchronize::is_at_safepoint()) {
   745       assert( permit_collection_pause, "invariant" );
   746       assert( Heap_lock->owned_by_self(), "invariant" );
   747       Heap_lock->unlock();
   748     }
   750     if (allocated_young_region != NULL) {
   751       HeapRegion* hr = allocated_young_region;
   752       HeapWord* bottom = hr->bottom();
   753       HeapWord* end = hr->end();
   754       MemRegion mr(bottom, end);
   755       ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
   756     }
   757   }
   759   assert( SafepointSynchronize::is_at_safepoint() ||
   760           (res == NULL && Heap_lock->owned_by_self()) ||
   761           (res != NULL && !Heap_lock->owned_by_self()),
   762           "post condition of the call" );
   764   return res;
   765 }
   767 HeapWord*
   768 G1CollectedHeap::mem_allocate(size_t word_size,
   769                               bool   is_noref,
   770                               bool   is_tlab,
   771                               bool* gc_overhead_limit_was_exceeded) {
   772   debug_only(check_for_valid_allocation_state());
   773   assert(no_gc_in_progress(), "Allocation during gc not allowed");
   774   HeapWord* result = NULL;
   776   // Loop until the allocation is satisified,
   777   // or unsatisfied after GC.
   778   for (int try_count = 1; /* return or throw */; try_count += 1) {
   779     int gc_count_before;
   780     {
   781       Heap_lock->lock();
   782       result = attempt_allocation(word_size);
   783       if (result != NULL) {
   784         // attempt_allocation should have unlocked the heap lock
   785         assert(is_in(result), "result not in heap");
   786         return result;
   787       }
   788       // Read the gc count while the heap lock is held.
   789       gc_count_before = SharedHeap::heap()->total_collections();
   790       Heap_lock->unlock();
   791     }
   793     // Create the garbage collection operation...
   794     VM_G1CollectForAllocation op(word_size,
   795                                  gc_count_before);
   797     // ...and get the VM thread to execute it.
   798     VMThread::execute(&op);
   799     if (op.prologue_succeeded()) {
   800       result = op.result();
   801       assert(result == NULL || is_in(result), "result not in heap");
   802       return result;
   803     }
   805     // Give a warning if we seem to be looping forever.
   806     if ((QueuedAllocationWarningCount > 0) &&
   807         (try_count % QueuedAllocationWarningCount == 0)) {
   808       warning("G1CollectedHeap::mem_allocate_work retries %d times",
   809               try_count);
   810     }
   811   }
   812 }
   814 void G1CollectedHeap::abandon_cur_alloc_region() {
   815   if (_cur_alloc_region != NULL) {
   816     // We're finished with the _cur_alloc_region.
   817     if (_cur_alloc_region->is_empty()) {
   818       _free_regions++;
   819       free_region(_cur_alloc_region);
   820     } else {
   821       _summary_bytes_used += _cur_alloc_region->used();
   822     }
   823     _cur_alloc_region = NULL;
   824   }
   825 }
   827 void G1CollectedHeap::abandon_gc_alloc_regions() {
   828   // first, make sure that the GC alloc region list is empty (it should!)
   829   assert(_gc_alloc_region_list == NULL, "invariant");
   830   release_gc_alloc_regions(true /* totally */);
   831 }
   833 class PostMCRemSetClearClosure: public HeapRegionClosure {
   834   ModRefBarrierSet* _mr_bs;
   835 public:
   836   PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
   837   bool doHeapRegion(HeapRegion* r) {
   838     r->reset_gc_time_stamp();
   839     if (r->continuesHumongous())
   840       return false;
   841     HeapRegionRemSet* hrrs = r->rem_set();
   842     if (hrrs != NULL) hrrs->clear();
   843     // You might think here that we could clear just the cards
   844     // corresponding to the used region.  But no: if we leave a dirty card
   845     // in a region we might allocate into, then it would prevent that card
   846     // from being enqueued, and cause it to be missed.
   847     // Re: the performance cost: we shouldn't be doing full GC anyway!
   848     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
   849     return false;
   850   }
   851 };
   854 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
   855   ModRefBarrierSet* _mr_bs;
   856 public:
   857   PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
   858   bool doHeapRegion(HeapRegion* r) {
   859     if (r->continuesHumongous()) return false;
   860     if (r->used_region().word_size() != 0) {
   861       _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
   862     }
   863     return false;
   864   }
   865 };
   867 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
   868   G1CollectedHeap*   _g1h;
   869   UpdateRSOopClosure _cl;
   870   int                _worker_i;
   871 public:
   872   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
   873     _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
   874     _worker_i(worker_i),
   875     _g1h(g1)
   876   { }
   877   bool doHeapRegion(HeapRegion* r) {
   878     if (!r->continuesHumongous()) {
   879       _cl.set_from(r);
   880       r->oop_iterate(&_cl);
   881     }
   882     return false;
   883   }
   884 };
   886 class ParRebuildRSTask: public AbstractGangTask {
   887   G1CollectedHeap* _g1;
   888 public:
   889   ParRebuildRSTask(G1CollectedHeap* g1)
   890     : AbstractGangTask("ParRebuildRSTask"),
   891       _g1(g1)
   892   { }
   894   void work(int i) {
   895     RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
   896     _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
   897                                          HeapRegion::RebuildRSClaimValue);
   898   }
   899 };
   901 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
   902                                     size_t word_size) {
   903   ResourceMark rm;
   905   if (full && DisableExplicitGC) {
   906     gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
   907     return;
   908   }
   910   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   911   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
   913   if (GC_locker::is_active()) {
   914     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   915   }
   917   {
   918     IsGCActiveMark x;
   920     // Timing
   921     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   922     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   923     TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
   925     double start = os::elapsedTime();
   926     GCOverheadReporter::recordSTWStart(start);
   927     g1_policy()->record_full_collection_start();
   929     gc_prologue(true);
   930     increment_total_collections();
   932     size_t g1h_prev_used = used();
   933     assert(used() == recalculate_used(), "Should be equal");
   935     if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
   936       HandleMark hm;  // Discard invalid handles created during verification
   937       prepare_for_verify();
   938       gclog_or_tty->print(" VerifyBeforeGC:");
   939       Universe::verify(true);
   940     }
   941     assert(regions_accounted_for(), "Region leakage!");
   943     COMPILER2_PRESENT(DerivedPointerTable::clear());
   945     // We want to discover references, but not process them yet.
   946     // This mode is disabled in
   947     // instanceRefKlass::process_discovered_references if the
   948     // generation does some collection work, or
   949     // instanceRefKlass::enqueue_discovered_references if the
   950     // generation returns without doing any work.
   951     ref_processor()->disable_discovery();
   952     ref_processor()->abandon_partial_discovery();
   953     ref_processor()->verify_no_references_recorded();
   955     // Abandon current iterations of concurrent marking and concurrent
   956     // refinement, if any are in progress.
   957     concurrent_mark()->abort();
   959     // Make sure we'll choose a new allocation region afterwards.
   960     abandon_cur_alloc_region();
   961     abandon_gc_alloc_regions();
   962     assert(_cur_alloc_region == NULL, "Invariant.");
   963     g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
   964     tear_down_region_lists();
   965     set_used_regions_to_need_zero_fill();
   966     if (g1_policy()->in_young_gc_mode()) {
   967       empty_young_list();
   968       g1_policy()->set_full_young_gcs(true);
   969     }
   971     // Temporarily make reference _discovery_ single threaded (non-MT).
   972     ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
   974     // Temporarily make refs discovery atomic
   975     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
   977     // Temporarily clear _is_alive_non_header
   978     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
   980     ref_processor()->enable_discovery();
   981     ref_processor()->setup_policy(clear_all_soft_refs);
   983     // Do collection work
   984     {
   985       HandleMark hm;  // Discard invalid handles created during gc
   986       G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
   987     }
   988     // Because freeing humongous regions may have added some unclean
   989     // regions, it is necessary to tear down again before rebuilding.
   990     tear_down_region_lists();
   991     rebuild_region_lists();
   993     _summary_bytes_used = recalculate_used();
   995     ref_processor()->enqueue_discovered_references();
   997     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   999     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  1000       HandleMark hm;  // Discard invalid handles created during verification
  1001       gclog_or_tty->print(" VerifyAfterGC:");
  1002       prepare_for_verify();
  1003       Universe::verify(false);
  1005     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
  1007     reset_gc_time_stamp();
  1008     // Since everything potentially moved, we will clear all remembered
  1009     // sets, and clear all cards.  Later we will rebuild remebered
  1010     // sets. We will also reset the GC time stamps of the regions.
  1011     PostMCRemSetClearClosure rs_clear(mr_bs());
  1012     heap_region_iterate(&rs_clear);
  1014     // Resize the heap if necessary.
  1015     resize_if_necessary_after_full_collection(full ? 0 : word_size);
  1017     if (_cg1r->use_cache()) {
  1018       _cg1r->clear_and_record_card_counts();
  1019       _cg1r->clear_hot_cache();
  1022     // Rebuild remembered sets of all regions.
  1023     if (ParallelGCThreads > 0) {
  1024       ParRebuildRSTask rebuild_rs_task(this);
  1025       assert(check_heap_region_claim_values(
  1026              HeapRegion::InitialClaimValue), "sanity check");
  1027       set_par_threads(workers()->total_workers());
  1028       workers()->run_task(&rebuild_rs_task);
  1029       set_par_threads(0);
  1030       assert(check_heap_region_claim_values(
  1031              HeapRegion::RebuildRSClaimValue), "sanity check");
  1032       reset_heap_region_claim_values();
  1033     } else {
  1034       RebuildRSOutOfRegionClosure rebuild_rs(this);
  1035       heap_region_iterate(&rebuild_rs);
  1038     if (PrintGC) {
  1039       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  1042     if (true) { // FIXME
  1043       // Ask the permanent generation to adjust size for full collections
  1044       perm()->compute_new_size();
  1047     double end = os::elapsedTime();
  1048     GCOverheadReporter::recordSTWEnd(end);
  1049     g1_policy()->record_full_collection_end();
  1051 #ifdef TRACESPINNING
  1052     ParallelTaskTerminator::print_termination_counts();
  1053 #endif
  1055     gc_epilogue(true);
  1057     // Discard all rset updates
  1058     JavaThread::dirty_card_queue_set().abandon_logs();
  1059     assert(!G1DeferredRSUpdate
  1060            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1061     assert(regions_accounted_for(), "Region leakage!");
  1064   if (g1_policy()->in_young_gc_mode()) {
  1065     _young_list->reset_sampled_info();
  1066     assert( check_young_list_empty(false, false),
  1067             "young list should be empty at this point");
  1071 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1072   do_collection(true, clear_all_soft_refs, 0);
  1075 // This code is mostly copied from TenuredGeneration.
  1076 void
  1077 G1CollectedHeap::
  1078 resize_if_necessary_after_full_collection(size_t word_size) {
  1079   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
  1081   // Include the current allocation, if any, and bytes that will be
  1082   // pre-allocated to support collections, as "used".
  1083   const size_t used_after_gc = used();
  1084   const size_t capacity_after_gc = capacity();
  1085   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1087   // We don't have floating point command-line arguments
  1088   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100;
  1089   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1090   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
  1091   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1093   size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage);
  1094   size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage);
  1096   // Don't shrink less than the initial size.
  1097   minimum_desired_capacity =
  1098     MAX2(minimum_desired_capacity,
  1099          collector_policy()->initial_heap_byte_size());
  1100   maximum_desired_capacity =
  1101     MAX2(maximum_desired_capacity,
  1102          collector_policy()->initial_heap_byte_size());
  1104   // We are failing here because minimum_desired_capacity is
  1105   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
  1106   assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check");
  1108   if (PrintGC && Verbose) {
  1109     const double free_percentage = ((double)free_after_gc) / capacity();
  1110     gclog_or_tty->print_cr("Computing new size after full GC ");
  1111     gclog_or_tty->print_cr("  "
  1112                            "  minimum_free_percentage: %6.2f",
  1113                            minimum_free_percentage);
  1114     gclog_or_tty->print_cr("  "
  1115                            "  maximum_free_percentage: %6.2f",
  1116                            maximum_free_percentage);
  1117     gclog_or_tty->print_cr("  "
  1118                            "  capacity: %6.1fK"
  1119                            "  minimum_desired_capacity: %6.1fK"
  1120                            "  maximum_desired_capacity: %6.1fK",
  1121                            capacity() / (double) K,
  1122                            minimum_desired_capacity / (double) K,
  1123                            maximum_desired_capacity / (double) K);
  1124     gclog_or_tty->print_cr("  "
  1125                            "   free_after_gc   : %6.1fK"
  1126                            "   used_after_gc   : %6.1fK",
  1127                            free_after_gc / (double) K,
  1128                            used_after_gc / (double) K);
  1129     gclog_or_tty->print_cr("  "
  1130                            "   free_percentage: %6.2f",
  1131                            free_percentage);
  1133   if (capacity() < minimum_desired_capacity) {
  1134     // Don't expand unless it's significant
  1135     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1136     expand(expand_bytes);
  1137     if (PrintGC && Verbose) {
  1138       gclog_or_tty->print_cr("    expanding:"
  1139                              "  minimum_desired_capacity: %6.1fK"
  1140                              "  expand_bytes: %6.1fK",
  1141                              minimum_desired_capacity / (double) K,
  1142                              expand_bytes / (double) K);
  1145     // No expansion, now see if we want to shrink
  1146   } else if (capacity() > maximum_desired_capacity) {
  1147     // Capacity too large, compute shrinking size
  1148     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1149     shrink(shrink_bytes);
  1150     if (PrintGC && Verbose) {
  1151       gclog_or_tty->print_cr("  "
  1152                              "  shrinking:"
  1153                              "  initSize: %.1fK"
  1154                              "  maximum_desired_capacity: %.1fK",
  1155                              collector_policy()->initial_heap_byte_size() / (double) K,
  1156                              maximum_desired_capacity / (double) K);
  1157       gclog_or_tty->print_cr("  "
  1158                              "  shrink_bytes: %.1fK",
  1159                              shrink_bytes / (double) K);
  1165 HeapWord*
  1166 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
  1167   HeapWord* result = NULL;
  1169   // In a G1 heap, we're supposed to keep allocation from failing by
  1170   // incremental pauses.  Therefore, at least for now, we'll favor
  1171   // expansion over collection.  (This might change in the future if we can
  1172   // do something smarter than full collection to satisfy a failed alloc.)
  1174   result = expand_and_allocate(word_size);
  1175   if (result != NULL) {
  1176     assert(is_in(result), "result not in heap");
  1177     return result;
  1180   // OK, I guess we have to try collection.
  1182   do_collection(false, false, word_size);
  1184   result = attempt_allocation(word_size, /*permit_collection_pause*/false);
  1186   if (result != NULL) {
  1187     assert(is_in(result), "result not in heap");
  1188     return result;
  1191   // Try collecting soft references.
  1192   do_collection(false, true, word_size);
  1193   result = attempt_allocation(word_size, /*permit_collection_pause*/false);
  1194   if (result != NULL) {
  1195     assert(is_in(result), "result not in heap");
  1196     return result;
  1199   // What else?  We might try synchronous finalization later.  If the total
  1200   // space available is large enough for the allocation, then a more
  1201   // complete compaction phase than we've tried so far might be
  1202   // appropriate.
  1203   return NULL;
  1206 // Attempting to expand the heap sufficiently
  1207 // to support an allocation of the given "word_size".  If
  1208 // successful, perform the allocation and return the address of the
  1209 // allocated block, or else "NULL".
  1211 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1212   size_t expand_bytes = word_size * HeapWordSize;
  1213   if (expand_bytes < MinHeapDeltaBytes) {
  1214     expand_bytes = MinHeapDeltaBytes;
  1216   expand(expand_bytes);
  1217   assert(regions_accounted_for(), "Region leakage!");
  1218   HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */);
  1219   return result;
  1222 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
  1223   size_t pre_used = 0;
  1224   size_t cleared_h_regions = 0;
  1225   size_t freed_regions = 0;
  1226   UncleanRegionList local_list;
  1227   free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
  1228                                     freed_regions, &local_list);
  1230   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
  1231                           &local_list);
  1232   return pre_used;
  1235 void
  1236 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
  1237                                                    size_t& pre_used,
  1238                                                    size_t& cleared_h,
  1239                                                    size_t& freed_regions,
  1240                                                    UncleanRegionList* list,
  1241                                                    bool par) {
  1242   assert(!hr->continuesHumongous(), "should have filtered these out");
  1243   size_t res = 0;
  1244   if (hr->used() > 0 && hr->garbage_bytes() == hr->used() &&
  1245       !hr->is_young()) {
  1246     if (G1PolicyVerbose > 0)
  1247       gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
  1248                                                                                " during cleanup", hr, hr->used());
  1249     free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
  1253 // FIXME: both this and shrink could probably be more efficient by
  1254 // doing one "VirtualSpace::expand_by" call rather than several.
  1255 void G1CollectedHeap::expand(size_t expand_bytes) {
  1256   size_t old_mem_size = _g1_storage.committed_size();
  1257   // We expand by a minimum of 1K.
  1258   expand_bytes = MAX2(expand_bytes, (size_t)K);
  1259   size_t aligned_expand_bytes =
  1260     ReservedSpace::page_align_size_up(expand_bytes);
  1261   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1262                                        HeapRegion::GrainBytes);
  1263   expand_bytes = aligned_expand_bytes;
  1264   while (expand_bytes > 0) {
  1265     HeapWord* base = (HeapWord*)_g1_storage.high();
  1266     // Commit more storage.
  1267     bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes);
  1268     if (!successful) {
  1269         expand_bytes = 0;
  1270     } else {
  1271       expand_bytes -= HeapRegion::GrainBytes;
  1272       // Expand the committed region.
  1273       HeapWord* high = (HeapWord*) _g1_storage.high();
  1274       _g1_committed.set_end(high);
  1275       // Create a new HeapRegion.
  1276       MemRegion mr(base, high);
  1277       bool is_zeroed = !_g1_max_committed.contains(base);
  1278       HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
  1280       // Now update max_committed if necessary.
  1281       _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
  1283       // Add it to the HeapRegionSeq.
  1284       _hrs->insert(hr);
  1285       // Set the zero-fill state, according to whether it's already
  1286       // zeroed.
  1288         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  1289         if (is_zeroed) {
  1290           hr->set_zero_fill_complete();
  1291           put_free_region_on_list_locked(hr);
  1292         } else {
  1293           hr->set_zero_fill_needed();
  1294           put_region_on_unclean_list_locked(hr);
  1297       _free_regions++;
  1298       // And we used up an expansion region to create it.
  1299       _expansion_regions--;
  1300       // Tell the cardtable about it.
  1301       Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1302       // And the offset table as well.
  1303       _bot_shared->resize(_g1_committed.word_size());
  1306   if (Verbose && PrintGC) {
  1307     size_t new_mem_size = _g1_storage.committed_size();
  1308     gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
  1309                            old_mem_size/K, aligned_expand_bytes/K,
  1310                            new_mem_size/K);
  1314 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
  1316   size_t old_mem_size = _g1_storage.committed_size();
  1317   size_t aligned_shrink_bytes =
  1318     ReservedSpace::page_align_size_down(shrink_bytes);
  1319   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1320                                          HeapRegion::GrainBytes);
  1321   size_t num_regions_deleted = 0;
  1322   MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
  1324   assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1325   if (mr.byte_size() > 0)
  1326     _g1_storage.shrink_by(mr.byte_size());
  1327   assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1329   _g1_committed.set_end(mr.start());
  1330   _free_regions -= num_regions_deleted;
  1331   _expansion_regions += num_regions_deleted;
  1333   // Tell the cardtable about it.
  1334   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1336   // And the offset table as well.
  1337   _bot_shared->resize(_g1_committed.word_size());
  1339   HeapRegionRemSet::shrink_heap(n_regions());
  1341   if (Verbose && PrintGC) {
  1342     size_t new_mem_size = _g1_storage.committed_size();
  1343     gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
  1344                            old_mem_size/K, aligned_shrink_bytes/K,
  1345                            new_mem_size/K);
  1349 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1350   release_gc_alloc_regions(true /* totally */);
  1351   tear_down_region_lists();  // We will rebuild them in a moment.
  1352   shrink_helper(shrink_bytes);
  1353   rebuild_region_lists();
  1356 // Public methods.
  1358 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1359 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1360 #endif // _MSC_VER
  1363 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1364   SharedHeap(policy_),
  1365   _g1_policy(policy_),
  1366   _ref_processor(NULL),
  1367   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1368   _bot_shared(NULL),
  1369   _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
  1370   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1371   _evac_failure_scan_stack(NULL) ,
  1372   _mark_in_progress(false),
  1373   _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
  1374   _cur_alloc_region(NULL),
  1375   _refine_cte_cl(NULL),
  1376   _free_region_list(NULL), _free_region_list_size(0),
  1377   _free_regions(0),
  1378   _full_collection(false),
  1379   _unclean_region_list(),
  1380   _unclean_regions_coming(false),
  1381   _young_list(new YoungList(this)),
  1382   _gc_time_stamp(0),
  1383   _surviving_young_words(NULL),
  1384   _in_cset_fast_test(NULL),
  1385   _in_cset_fast_test_base(NULL),
  1386   _dirty_cards_region_list(NULL) {
  1387   _g1h = this; // To catch bugs.
  1388   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1389     vm_exit_during_initialization("Failed necessary allocation.");
  1391   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1392   _task_queues = new RefToScanQueueSet(n_queues);
  1394   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1395   assert(n_rem_sets > 0, "Invariant.");
  1397   HeapRegionRemSetIterator** iter_arr =
  1398     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
  1399   for (int i = 0; i < n_queues; i++) {
  1400     iter_arr[i] = new HeapRegionRemSetIterator();
  1402   _rem_set_iterator = iter_arr;
  1404   for (int i = 0; i < n_queues; i++) {
  1405     RefToScanQueue* q = new RefToScanQueue();
  1406     q->initialize();
  1407     _task_queues->register_queue(i, q);
  1410   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  1411     _gc_alloc_regions[ap]          = NULL;
  1412     _gc_alloc_region_counts[ap]    = 0;
  1413     _retained_gc_alloc_regions[ap] = NULL;
  1414     // by default, we do not retain a GC alloc region for each ap;
  1415     // we'll override this, when appropriate, below
  1416     _retain_gc_alloc_region[ap]    = false;
  1419   // We will try to remember the last half-full tenured region we
  1420   // allocated to at the end of a collection so that we can re-use it
  1421   // during the next collection.
  1422   _retain_gc_alloc_region[GCAllocForTenured]  = true;
  1424   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1427 jint G1CollectedHeap::initialize() {
  1428   os::enable_vtime();
  1430   // Necessary to satisfy locking discipline assertions.
  1432   MutexLocker x(Heap_lock);
  1434   // While there are no constraints in the GC code that HeapWordSize
  1435   // be any particular value, there are multiple other areas in the
  1436   // system which believe this to be true (e.g. oop->object_size in some
  1437   // cases incorrectly returns the size in wordSize units rather than
  1438   // HeapWordSize).
  1439   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  1441   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  1442   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  1444   // Ensure that the sizes are properly aligned.
  1445   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1446   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1448   // We allocate this in any case, but only do no work if the command line
  1449   // param is off.
  1450   _cg1r = new ConcurrentG1Refine();
  1452   // Reserve the maximum.
  1453   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
  1454   // Includes the perm-gen.
  1456   const size_t total_reserved = max_byte_size + pgs->max_size();
  1457   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
  1459   ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
  1460                         HeapRegion::GrainBytes,
  1461                         false /*ism*/, addr);
  1463   if (UseCompressedOops) {
  1464     if (addr != NULL && !heap_rs.is_reserved()) {
  1465       // Failed to reserve at specified address - the requested memory
  1466       // region is taken already, for example, by 'java' launcher.
  1467       // Try again to reserver heap higher.
  1468       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
  1469       ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
  1470                              false /*ism*/, addr);
  1471       if (addr != NULL && !heap_rs0.is_reserved()) {
  1472         // Failed to reserve at specified address again - give up.
  1473         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
  1474         assert(addr == NULL, "");
  1475         ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
  1476                                false /*ism*/, addr);
  1477         heap_rs = heap_rs1;
  1478       } else {
  1479         heap_rs = heap_rs0;
  1484   if (!heap_rs.is_reserved()) {
  1485     vm_exit_during_initialization("Could not reserve enough space for object heap");
  1486     return JNI_ENOMEM;
  1489   // It is important to do this in a way such that concurrent readers can't
  1490   // temporarily think somethings in the heap.  (I've actually seen this
  1491   // happen in asserts: DLD.)
  1492   _reserved.set_word_size(0);
  1493   _reserved.set_start((HeapWord*)heap_rs.base());
  1494   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  1496   _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
  1498   _num_humongous_regions = 0;
  1500   // Create the gen rem set (and barrier set) for the entire reserved region.
  1501   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  1502   set_barrier_set(rem_set()->bs());
  1503   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  1504     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  1505   } else {
  1506     vm_exit_during_initialization("G1 requires a mod ref bs.");
  1507     return JNI_ENOMEM;
  1510   // Also create a G1 rem set.
  1511   if (G1UseHRIntoRS) {
  1512     if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
  1513       _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
  1514     } else {
  1515       vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
  1516       return JNI_ENOMEM;
  1518   } else {
  1519     _g1_rem_set = new StupidG1RemSet(this);
  1522   // Carve out the G1 part of the heap.
  1524   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  1525   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  1526                            g1_rs.size()/HeapWordSize);
  1527   ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
  1529   _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
  1531   _g1_storage.initialize(g1_rs, 0);
  1532   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  1533   _g1_max_committed = _g1_committed;
  1534   _hrs = new HeapRegionSeq(_expansion_regions);
  1535   guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
  1536   guarantee(_cur_alloc_region == NULL, "from constructor");
  1538   // 6843694 - ensure that the maximum region index can fit
  1539   // in the remembered set structures.
  1540   const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  1541   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  1543   const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
  1544   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  1545   guarantee(cards_per_region < max_cards_per_region, "too many cards per region");
  1547   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  1548                                              heap_word_size(init_byte_size));
  1550   _g1h = this;
  1552   // Create the ConcurrentMark data structure and thread.
  1553   // (Must do this late, so that "max_regions" is defined.)
  1554   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
  1555   _cmThread = _cm->cmThread();
  1557   // ...and the concurrent zero-fill thread, if necessary.
  1558   if (G1ConcZeroFill) {
  1559     _czft = new ConcurrentZFThread();
  1562   // Initialize the from_card cache structure of HeapRegionRemSet.
  1563   HeapRegionRemSet::init_heap(max_regions());
  1565   // Now expand into the initial heap size.
  1566   expand(init_byte_size);
  1568   // Perform any initialization actions delegated to the policy.
  1569   g1_policy()->init();
  1571   g1_policy()->note_start_of_mark_thread();
  1573   _refine_cte_cl =
  1574     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
  1575                                     g1_rem_set(),
  1576                                     concurrent_g1_refine());
  1577   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
  1579   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  1580                                                SATB_Q_FL_lock,
  1581                                                0,
  1582                                                Shared_SATB_Q_lock);
  1584   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1585                                                 DirtyCardQ_FL_lock,
  1586                                                 G1DirtyCardQueueMax,
  1587                                                 Shared_DirtyCardQ_lock);
  1589   if (G1DeferredRSUpdate) {
  1590     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1591                                       DirtyCardQ_FL_lock,
  1592                                       0,
  1593                                       Shared_DirtyCardQ_lock,
  1594                                       &JavaThread::dirty_card_queue_set());
  1596   // In case we're keeping closure specialization stats, initialize those
  1597   // counts and that mechanism.
  1598   SpecializationStats::clear();
  1600   _gc_alloc_region_list = NULL;
  1602   // Do later initialization work for concurrent refinement.
  1603   _cg1r->init();
  1605   const char* group_names[] = { "CR", "ZF", "CM", "CL" };
  1606   GCOverheadReporter::initGCOverheadReporter(4, group_names);
  1608   return JNI_OK;
  1611 void G1CollectedHeap::ref_processing_init() {
  1612   SharedHeap::ref_processing_init();
  1613   MemRegion mr = reserved_region();
  1614   _ref_processor = ReferenceProcessor::create_ref_processor(
  1615                                          mr,    // span
  1616                                          false, // Reference discovery is not atomic
  1617                                                 // (though it shouldn't matter here.)
  1618                                          true,  // mt_discovery
  1619                                          NULL,  // is alive closure: need to fill this in for efficiency
  1620                                          ParallelGCThreads,
  1621                                          ParallelRefProcEnabled,
  1622                                          true); // Setting next fields of discovered
  1623                                                 // lists requires a barrier.
  1626 size_t G1CollectedHeap::capacity() const {
  1627   return _g1_committed.byte_size();
  1630 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
  1631                                                  int worker_i) {
  1632   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1633   int n_completed_buffers = 0;
  1634   while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) {
  1635     n_completed_buffers++;
  1637   g1_policy()->record_update_rs_processed_buffers(worker_i,
  1638                                                   (double) n_completed_buffers);
  1639   dcqs.clear_n_completed_buffers();
  1640   // Finish up the queue...
  1641   if (worker_i == 0) concurrent_g1_refine()->clean_up_cache(worker_i,
  1642                                                             g1_rem_set());
  1643   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  1647 // Computes the sum of the storage used by the various regions.
  1649 size_t G1CollectedHeap::used() const {
  1650   assert(Heap_lock->owner() != NULL,
  1651          "Should be owned on this thread's behalf.");
  1652   size_t result = _summary_bytes_used;
  1653   if (_cur_alloc_region != NULL)
  1654     result += _cur_alloc_region->used();
  1655   return result;
  1658 class SumUsedClosure: public HeapRegionClosure {
  1659   size_t _used;
  1660 public:
  1661   SumUsedClosure() : _used(0) {}
  1662   bool doHeapRegion(HeapRegion* r) {
  1663     if (!r->continuesHumongous()) {
  1664       _used += r->used();
  1666     return false;
  1668   size_t result() { return _used; }
  1669 };
  1671 size_t G1CollectedHeap::recalculate_used() const {
  1672   SumUsedClosure blk;
  1673   _hrs->iterate(&blk);
  1674   return blk.result();
  1677 #ifndef PRODUCT
  1678 class SumUsedRegionsClosure: public HeapRegionClosure {
  1679   size_t _num;
  1680 public:
  1681   SumUsedRegionsClosure() : _num(0) {}
  1682   bool doHeapRegion(HeapRegion* r) {
  1683     if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
  1684       _num += 1;
  1686     return false;
  1688   size_t result() { return _num; }
  1689 };
  1691 size_t G1CollectedHeap::recalculate_used_regions() const {
  1692   SumUsedRegionsClosure blk;
  1693   _hrs->iterate(&blk);
  1694   return blk.result();
  1696 #endif // PRODUCT
  1698 size_t G1CollectedHeap::unsafe_max_alloc() {
  1699   if (_free_regions > 0) return HeapRegion::GrainBytes;
  1700   // otherwise, is there space in the current allocation region?
  1702   // We need to store the current allocation region in a local variable
  1703   // here. The problem is that this method doesn't take any locks and
  1704   // there may be other threads which overwrite the current allocation
  1705   // region field. attempt_allocation(), for example, sets it to NULL
  1706   // and this can happen *after* the NULL check here but before the call
  1707   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  1708   // to be a problem in the optimized build, since the two loads of the
  1709   // current allocation region field are optimized away.
  1710   HeapRegion* car = _cur_alloc_region;
  1712   // FIXME: should iterate over all regions?
  1713   if (car == NULL) {
  1714     return 0;
  1716   return car->free();
  1719 void G1CollectedHeap::collect(GCCause::Cause cause) {
  1720   // The caller doesn't have the Heap_lock
  1721   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  1722   MutexLocker ml(Heap_lock);
  1723   collect_locked(cause);
  1726 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  1727   assert(Thread::current()->is_VM_thread(), "Precondition#1");
  1728   assert(Heap_lock->is_locked(), "Precondition#2");
  1729   GCCauseSetter gcs(this, cause);
  1730   switch (cause) {
  1731     case GCCause::_heap_inspection:
  1732     case GCCause::_heap_dump: {
  1733       HandleMark hm;
  1734       do_full_collection(false);         // don't clear all soft refs
  1735       break;
  1737     default: // XXX FIX ME
  1738       ShouldNotReachHere(); // Unexpected use of this function
  1743 void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
  1744   // Don't want to do a GC until cleanup is completed.
  1745   wait_for_cleanup_complete();
  1747   // Read the GC count while holding the Heap_lock
  1748   int gc_count_before = SharedHeap::heap()->total_collections();
  1750     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
  1751     VM_G1CollectFull op(gc_count_before, cause);
  1752     VMThread::execute(&op);
  1756 bool G1CollectedHeap::is_in(const void* p) const {
  1757   if (_g1_committed.contains(p)) {
  1758     HeapRegion* hr = _hrs->addr_to_region(p);
  1759     return hr->is_in(p);
  1760   } else {
  1761     return _perm_gen->as_gen()->is_in(p);
  1765 // Iteration functions.
  1767 // Iterates an OopClosure over all ref-containing fields of objects
  1768 // within a HeapRegion.
  1770 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  1771   MemRegion _mr;
  1772   OopClosure* _cl;
  1773 public:
  1774   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  1775     : _mr(mr), _cl(cl) {}
  1776   bool doHeapRegion(HeapRegion* r) {
  1777     if (! r->continuesHumongous()) {
  1778       r->oop_iterate(_cl);
  1780     return false;
  1782 };
  1784 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
  1785   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  1786   _hrs->iterate(&blk);
  1787   if (do_perm) {
  1788     perm_gen()->oop_iterate(cl);
  1792 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
  1793   IterateOopClosureRegionClosure blk(mr, cl);
  1794   _hrs->iterate(&blk);
  1795   if (do_perm) {
  1796     perm_gen()->oop_iterate(cl);
  1800 // Iterates an ObjectClosure over all objects within a HeapRegion.
  1802 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  1803   ObjectClosure* _cl;
  1804 public:
  1805   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  1806   bool doHeapRegion(HeapRegion* r) {
  1807     if (! r->continuesHumongous()) {
  1808       r->object_iterate(_cl);
  1810     return false;
  1812 };
  1814 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
  1815   IterateObjectClosureRegionClosure blk(cl);
  1816   _hrs->iterate(&blk);
  1817   if (do_perm) {
  1818     perm_gen()->object_iterate(cl);
  1822 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
  1823   // FIXME: is this right?
  1824   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
  1827 // Calls a SpaceClosure on a HeapRegion.
  1829 class SpaceClosureRegionClosure: public HeapRegionClosure {
  1830   SpaceClosure* _cl;
  1831 public:
  1832   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  1833   bool doHeapRegion(HeapRegion* r) {
  1834     _cl->do_space(r);
  1835     return false;
  1837 };
  1839 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  1840   SpaceClosureRegionClosure blk(cl);
  1841   _hrs->iterate(&blk);
  1844 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
  1845   _hrs->iterate(cl);
  1848 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
  1849                                                HeapRegionClosure* cl) {
  1850   _hrs->iterate_from(r, cl);
  1853 void
  1854 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
  1855   _hrs->iterate_from(idx, cl);
  1858 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
  1860 void
  1861 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  1862                                                  int worker,
  1863                                                  jint claim_value) {
  1864   const size_t regions = n_regions();
  1865   const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
  1866   // try to spread out the starting points of the workers
  1867   const size_t start_index = regions / worker_num * (size_t) worker;
  1869   // each worker will actually look at all regions
  1870   for (size_t count = 0; count < regions; ++count) {
  1871     const size_t index = (start_index + count) % regions;
  1872     assert(0 <= index && index < regions, "sanity");
  1873     HeapRegion* r = region_at(index);
  1874     // we'll ignore "continues humongous" regions (we'll process them
  1875     // when we come across their corresponding "start humongous"
  1876     // region) and regions already claimed
  1877     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  1878       continue;
  1880     // OK, try to claim it
  1881     if (r->claimHeapRegion(claim_value)) {
  1882       // success!
  1883       assert(!r->continuesHumongous(), "sanity");
  1884       if (r->startsHumongous()) {
  1885         // If the region is "starts humongous" we'll iterate over its
  1886         // "continues humongous" first; in fact we'll do them
  1887         // first. The order is important. In on case, calling the
  1888         // closure on the "starts humongous" region might de-allocate
  1889         // and clear all its "continues humongous" regions and, as a
  1890         // result, we might end up processing them twice. So, we'll do
  1891         // them first (notice: most closures will ignore them anyway) and
  1892         // then we'll do the "starts humongous" region.
  1893         for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
  1894           HeapRegion* chr = region_at(ch_index);
  1896           // if the region has already been claimed or it's not
  1897           // "continues humongous" we're done
  1898           if (chr->claim_value() == claim_value ||
  1899               !chr->continuesHumongous()) {
  1900             break;
  1903           // Noone should have claimed it directly. We can given
  1904           // that we claimed its "starts humongous" region.
  1905           assert(chr->claim_value() != claim_value, "sanity");
  1906           assert(chr->humongous_start_region() == r, "sanity");
  1908           if (chr->claimHeapRegion(claim_value)) {
  1909             // we should always be able to claim it; noone else should
  1910             // be trying to claim this region
  1912             bool res2 = cl->doHeapRegion(chr);
  1913             assert(!res2, "Should not abort");
  1915             // Right now, this holds (i.e., no closure that actually
  1916             // does something with "continues humongous" regions
  1917             // clears them). We might have to weaken it in the future,
  1918             // but let's leave these two asserts here for extra safety.
  1919             assert(chr->continuesHumongous(), "should still be the case");
  1920             assert(chr->humongous_start_region() == r, "sanity");
  1921           } else {
  1922             guarantee(false, "we should not reach here");
  1927       assert(!r->continuesHumongous(), "sanity");
  1928       bool res = cl->doHeapRegion(r);
  1929       assert(!res, "Should not abort");
  1934 class ResetClaimValuesClosure: public HeapRegionClosure {
  1935 public:
  1936   bool doHeapRegion(HeapRegion* r) {
  1937     r->set_claim_value(HeapRegion::InitialClaimValue);
  1938     return false;
  1940 };
  1942 void
  1943 G1CollectedHeap::reset_heap_region_claim_values() {
  1944   ResetClaimValuesClosure blk;
  1945   heap_region_iterate(&blk);
  1948 #ifdef ASSERT
  1949 // This checks whether all regions in the heap have the correct claim
  1950 // value. I also piggy-backed on this a check to ensure that the
  1951 // humongous_start_region() information on "continues humongous"
  1952 // regions is correct.
  1954 class CheckClaimValuesClosure : public HeapRegionClosure {
  1955 private:
  1956   jint _claim_value;
  1957   size_t _failures;
  1958   HeapRegion* _sh_region;
  1959 public:
  1960   CheckClaimValuesClosure(jint claim_value) :
  1961     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  1962   bool doHeapRegion(HeapRegion* r) {
  1963     if (r->claim_value() != _claim_value) {
  1964       gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  1965                              "claim value = %d, should be %d",
  1966                              r->bottom(), r->end(), r->claim_value(),
  1967                              _claim_value);
  1968       ++_failures;
  1970     if (!r->isHumongous()) {
  1971       _sh_region = NULL;
  1972     } else if (r->startsHumongous()) {
  1973       _sh_region = r;
  1974     } else if (r->continuesHumongous()) {
  1975       if (r->humongous_start_region() != _sh_region) {
  1976         gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  1977                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  1978                                r->bottom(), r->end(),
  1979                                r->humongous_start_region(),
  1980                                _sh_region);
  1981         ++_failures;
  1984     return false;
  1986   size_t failures() {
  1987     return _failures;
  1989 };
  1991 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  1992   CheckClaimValuesClosure cl(claim_value);
  1993   heap_region_iterate(&cl);
  1994   return cl.failures() == 0;
  1996 #endif // ASSERT
  1998 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  1999   HeapRegion* r = g1_policy()->collection_set();
  2000   while (r != NULL) {
  2001     HeapRegion* next = r->next_in_collection_set();
  2002     if (cl->doHeapRegion(r)) {
  2003       cl->incomplete();
  2004       return;
  2006     r = next;
  2010 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2011                                                   HeapRegionClosure *cl) {
  2012   assert(r->in_collection_set(),
  2013          "Start region must be a member of the collection set.");
  2014   HeapRegion* cur = r;
  2015   while (cur != NULL) {
  2016     HeapRegion* next = cur->next_in_collection_set();
  2017     if (cl->doHeapRegion(cur) && false) {
  2018       cl->incomplete();
  2019       return;
  2021     cur = next;
  2023   cur = g1_policy()->collection_set();
  2024   while (cur != r) {
  2025     HeapRegion* next = cur->next_in_collection_set();
  2026     if (cl->doHeapRegion(cur) && false) {
  2027       cl->incomplete();
  2028       return;
  2030     cur = next;
  2034 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
  2035   return _hrs->length() > 0 ? _hrs->at(0) : NULL;
  2039 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2040   Space* res = heap_region_containing(addr);
  2041   if (res == NULL)
  2042     res = perm_gen()->space_containing(addr);
  2043   return res;
  2046 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2047   Space* sp = space_containing(addr);
  2048   if (sp != NULL) {
  2049     return sp->block_start(addr);
  2051   return NULL;
  2054 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  2055   Space* sp = space_containing(addr);
  2056   assert(sp != NULL, "block_size of address outside of heap");
  2057   return sp->block_size(addr);
  2060 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  2061   Space* sp = space_containing(addr);
  2062   return sp->block_is_obj(addr);
  2065 bool G1CollectedHeap::supports_tlab_allocation() const {
  2066   return true;
  2069 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  2070   return HeapRegion::GrainBytes;
  2073 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  2074   // Return the remaining space in the cur alloc region, but not less than
  2075   // the min TLAB size.
  2076   // Also, no more than half the region size, since we can't allow tlabs to
  2077   // grow big enough to accomodate humongous objects.
  2079   // We need to story it locally, since it might change between when we
  2080   // test for NULL and when we use it later.
  2081   ContiguousSpace* cur_alloc_space = _cur_alloc_region;
  2082   if (cur_alloc_space == NULL) {
  2083     return HeapRegion::GrainBytes/2;
  2084   } else {
  2085     return MAX2(MIN2(cur_alloc_space->free(),
  2086                      (size_t)(HeapRegion::GrainBytes/2)),
  2087                 (size_t)MinTLABSize);
  2091 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) {
  2092   bool dummy;
  2093   return G1CollectedHeap::mem_allocate(size, false, true, &dummy);
  2096 bool G1CollectedHeap::allocs_are_zero_filled() {
  2097   return false;
  2100 size_t G1CollectedHeap::large_typearray_limit() {
  2101   // FIXME
  2102   return HeapRegion::GrainBytes/HeapWordSize;
  2105 size_t G1CollectedHeap::max_capacity() const {
  2106   return _g1_committed.byte_size();
  2109 jlong G1CollectedHeap::millis_since_last_gc() {
  2110   // assert(false, "NYI");
  2111   return 0;
  2115 void G1CollectedHeap::prepare_for_verify() {
  2116   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2117     ensure_parsability(false);
  2119   g1_rem_set()->prepare_for_verify();
  2122 class VerifyLivenessOopClosure: public OopClosure {
  2123   G1CollectedHeap* g1h;
  2124 public:
  2125   VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
  2126     g1h = _g1h;
  2128   void do_oop(narrowOop *p) {
  2129     guarantee(false, "NYI");
  2131   void do_oop(oop *p) {
  2132     oop obj = *p;
  2133     assert(obj == NULL || !g1h->is_obj_dead(obj),
  2134            "Dead object referenced by a not dead object");
  2136 };
  2138 class VerifyObjsInRegionClosure: public ObjectClosure {
  2139   G1CollectedHeap* _g1h;
  2140   size_t _live_bytes;
  2141   HeapRegion *_hr;
  2142 public:
  2143   VerifyObjsInRegionClosure(HeapRegion *hr) : _live_bytes(0), _hr(hr) {
  2144     _g1h = G1CollectedHeap::heap();
  2146   void do_object(oop o) {
  2147     VerifyLivenessOopClosure isLive(_g1h);
  2148     assert(o != NULL, "Huh?");
  2149     if (!_g1h->is_obj_dead(o)) {
  2150       o->oop_iterate(&isLive);
  2151       if (!_hr->obj_allocated_since_prev_marking(o))
  2152         _live_bytes += (o->size() * HeapWordSize);
  2155   size_t live_bytes() { return _live_bytes; }
  2156 };
  2158 class PrintObjsInRegionClosure : public ObjectClosure {
  2159   HeapRegion *_hr;
  2160   G1CollectedHeap *_g1;
  2161 public:
  2162   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  2163     _g1 = G1CollectedHeap::heap();
  2164   };
  2166   void do_object(oop o) {
  2167     if (o != NULL) {
  2168       HeapWord *start = (HeapWord *) o;
  2169       size_t word_sz = o->size();
  2170       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  2171                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  2172                           (void*) o, word_sz,
  2173                           _g1->isMarkedPrev(o),
  2174                           _g1->isMarkedNext(o),
  2175                           _hr->obj_allocated_since_prev_marking(o));
  2176       HeapWord *end = start + word_sz;
  2177       HeapWord *cur;
  2178       int *val;
  2179       for (cur = start; cur < end; cur++) {
  2180         val = (int *) cur;
  2181         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  2185 };
  2187 class VerifyRegionClosure: public HeapRegionClosure {
  2188 public:
  2189   bool _allow_dirty;
  2190   bool _par;
  2191   VerifyRegionClosure(bool allow_dirty, bool par = false)
  2192     : _allow_dirty(allow_dirty), _par(par) {}
  2193   bool doHeapRegion(HeapRegion* r) {
  2194     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
  2195               "Should be unclaimed at verify points.");
  2196     if (!r->continuesHumongous()) {
  2197       VerifyObjsInRegionClosure not_dead_yet_cl(r);
  2198       r->verify(_allow_dirty);
  2199       r->object_iterate(&not_dead_yet_cl);
  2200       guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
  2201                 "More live objects than counted in last complete marking.");
  2203     return false;
  2205 };
  2207 class VerifyRootsClosure: public OopsInGenClosure {
  2208 private:
  2209   G1CollectedHeap* _g1h;
  2210   bool             _failures;
  2212 public:
  2213   VerifyRootsClosure() :
  2214     _g1h(G1CollectedHeap::heap()), _failures(false) { }
  2216   bool failures() { return _failures; }
  2218   void do_oop(narrowOop* p) {
  2219     guarantee(false, "NYI");
  2222   void do_oop(oop* p) {
  2223     oop obj = *p;
  2224     if (obj != NULL) {
  2225       if (_g1h->is_obj_dead(obj)) {
  2226         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  2227                                "points to dead obj "PTR_FORMAT, p, (void*) obj);
  2228         obj->print_on(gclog_or_tty);
  2229         _failures = true;
  2233 };
  2235 // This is the task used for parallel heap verification.
  2237 class G1ParVerifyTask: public AbstractGangTask {
  2238 private:
  2239   G1CollectedHeap* _g1h;
  2240   bool _allow_dirty;
  2242 public:
  2243   G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) :
  2244     AbstractGangTask("Parallel verify task"),
  2245     _g1h(g1h), _allow_dirty(allow_dirty) { }
  2247   void work(int worker_i) {
  2248     HandleMark hm;
  2249     VerifyRegionClosure blk(_allow_dirty, true);
  2250     _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
  2251                                           HeapRegion::ParVerifyClaimValue);
  2253 };
  2255 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
  2256   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2257     if (!silent) { gclog_or_tty->print("roots "); }
  2258     VerifyRootsClosure rootsCl;
  2259     process_strong_roots(false,
  2260                          SharedHeap::SO_AllClasses,
  2261                          &rootsCl,
  2262                          &rootsCl);
  2263     rem_set()->invalidate(perm_gen()->used_region(), false);
  2264     if (!silent) { gclog_or_tty->print("heapRegions "); }
  2265     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  2266       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2267              "sanity check");
  2269       G1ParVerifyTask task(this, allow_dirty);
  2270       int n_workers = workers()->total_workers();
  2271       set_par_threads(n_workers);
  2272       workers()->run_task(&task);
  2273       set_par_threads(0);
  2275       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  2276              "sanity check");
  2278       reset_heap_region_claim_values();
  2280       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2281              "sanity check");
  2282     } else {
  2283       VerifyRegionClosure blk(allow_dirty);
  2284       _hrs->iterate(&blk);
  2286     if (!silent) gclog_or_tty->print("remset ");
  2287     rem_set()->verify();
  2288     guarantee(!rootsCl.failures(), "should not have had failures");
  2289   } else {
  2290     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
  2294 class PrintRegionClosure: public HeapRegionClosure {
  2295   outputStream* _st;
  2296 public:
  2297   PrintRegionClosure(outputStream* st) : _st(st) {}
  2298   bool doHeapRegion(HeapRegion* r) {
  2299     r->print_on(_st);
  2300     return false;
  2302 };
  2304 void G1CollectedHeap::print() const { print_on(gclog_or_tty); }
  2306 void G1CollectedHeap::print_on(outputStream* st) const {
  2307   PrintRegionClosure blk(st);
  2308   _hrs->iterate(&blk);
  2311 class PrintOnThreadsClosure : public ThreadClosure {
  2312   outputStream* _st;
  2313 public:
  2314   PrintOnThreadsClosure(outputStream* st) : _st(st) { }
  2315   virtual void do_thread(Thread *t) {
  2316     t->print_on(_st);
  2318 };
  2320 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  2321   if (ParallelGCThreads > 0) {
  2322     workers()->print_worker_threads();
  2324   st->print("\"G1 concurrent mark GC Thread\" ");
  2325   _cmThread->print();
  2326   st->cr();
  2327   st->print("\"G1 concurrent refinement GC Threads\" ");
  2328   PrintOnThreadsClosure p(st);
  2329   _cg1r->threads_do(&p);
  2330   st->cr();
  2331   st->print("\"G1 zero-fill GC Thread\" ");
  2332   _czft->print_on(st);
  2333   st->cr();
  2336 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  2337   if (ParallelGCThreads > 0) {
  2338     workers()->threads_do(tc);
  2340   tc->do_thread(_cmThread);
  2341   _cg1r->threads_do(tc);
  2342   tc->do_thread(_czft);
  2345 void G1CollectedHeap::print_tracing_info() const {
  2346   concurrent_g1_refine()->print_final_card_counts();
  2348   // We'll overload this to mean "trace GC pause statistics."
  2349   if (TraceGen0Time || TraceGen1Time) {
  2350     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  2351     // to that.
  2352     g1_policy()->print_tracing_info();
  2354   if (G1SummarizeRSetStats) {
  2355     g1_rem_set()->print_summary_info();
  2357   if (G1SummarizeConcurrentMark) {
  2358     concurrent_mark()->print_summary_info();
  2360   if (G1SummarizeZFStats) {
  2361     ConcurrentZFThread::print_summary_info();
  2363   g1_policy()->print_yg_surv_rate_info();
  2365   GCOverheadReporter::printGCOverhead();
  2367   SpecializationStats::print();
  2371 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
  2372   HeapRegion* hr = heap_region_containing(addr);
  2373   if (hr == NULL) {
  2374     return 0;
  2375   } else {
  2376     return 1;
  2380 G1CollectedHeap* G1CollectedHeap::heap() {
  2381   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  2382          "not a garbage-first heap");
  2383   return _g1h;
  2386 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  2387   if (PrintHeapAtGC){
  2388     gclog_or_tty->print_cr(" {Heap before GC collections=%d:", total_collections());
  2389     Universe::print();
  2391   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  2392   // Call allocation profiler
  2393   AllocationProfiler::iterate_since_last_gc();
  2394   // Fill TLAB's and such
  2395   ensure_parsability(true);
  2398 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  2399   // FIXME: what is this about?
  2400   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  2401   // is set.
  2402   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  2403                         "derived pointer present"));
  2405   if (PrintHeapAtGC){
  2406     gclog_or_tty->print_cr(" Heap after GC collections=%d:", total_collections());
  2407     Universe::print();
  2408     gclog_or_tty->print("} ");
  2412 void G1CollectedHeap::do_collection_pause() {
  2413   // Read the GC count while holding the Heap_lock
  2414   // we need to do this _before_ wait_for_cleanup_complete(), to
  2415   // ensure that we do not give up the heap lock and potentially
  2416   // pick up the wrong count
  2417   int gc_count_before = SharedHeap::heap()->total_collections();
  2419   // Don't want to do a GC pause while cleanup is being completed!
  2420   wait_for_cleanup_complete();
  2422   g1_policy()->record_stop_world_start();
  2424     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
  2425     VM_G1IncCollectionPause op(gc_count_before);
  2426     VMThread::execute(&op);
  2430 void
  2431 G1CollectedHeap::doConcurrentMark() {
  2432   if (G1ConcMark) {
  2433     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2434     if (!_cmThread->in_progress()) {
  2435       _cmThread->set_started();
  2436       CGC_lock->notify();
  2441 class VerifyMarkedObjsClosure: public ObjectClosure {
  2442     G1CollectedHeap* _g1h;
  2443     public:
  2444     VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
  2445     void do_object(oop obj) {
  2446       assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
  2447              "markandsweep mark should agree with concurrent deadness");
  2449 };
  2451 void
  2452 G1CollectedHeap::checkConcurrentMark() {
  2453     VerifyMarkedObjsClosure verifycl(this);
  2454     //    MutexLockerEx x(getMarkBitMapLock(),
  2455     //              Mutex::_no_safepoint_check_flag);
  2456     object_iterate(&verifycl, false);
  2459 void G1CollectedHeap::do_sync_mark() {
  2460   _cm->checkpointRootsInitial();
  2461   _cm->markFromRoots();
  2462   _cm->checkpointRootsFinal(false);
  2465 // <NEW PREDICTION>
  2467 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
  2468                                                        bool young) {
  2469   return _g1_policy->predict_region_elapsed_time_ms(hr, young);
  2472 void G1CollectedHeap::check_if_region_is_too_expensive(double
  2473                                                            predicted_time_ms) {
  2474   _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
  2477 size_t G1CollectedHeap::pending_card_num() {
  2478   size_t extra_cards = 0;
  2479   JavaThread *curr = Threads::first();
  2480   while (curr != NULL) {
  2481     DirtyCardQueue& dcq = curr->dirty_card_queue();
  2482     extra_cards += dcq.size();
  2483     curr = curr->next();
  2485   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2486   size_t buffer_size = dcqs.buffer_size();
  2487   size_t buffer_num = dcqs.completed_buffers_num();
  2488   return buffer_size * buffer_num + extra_cards;
  2491 size_t G1CollectedHeap::max_pending_card_num() {
  2492   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2493   size_t buffer_size = dcqs.buffer_size();
  2494   size_t buffer_num  = dcqs.completed_buffers_num();
  2495   int thread_num  = Threads::number_of_threads();
  2496   return (buffer_num + thread_num) * buffer_size;
  2499 size_t G1CollectedHeap::cards_scanned() {
  2500   HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
  2501   return g1_rset->cardsScanned();
  2504 void
  2505 G1CollectedHeap::setup_surviving_young_words() {
  2506   guarantee( _surviving_young_words == NULL, "pre-condition" );
  2507   size_t array_length = g1_policy()->young_cset_length();
  2508   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
  2509   if (_surviving_young_words == NULL) {
  2510     vm_exit_out_of_memory(sizeof(size_t) * array_length,
  2511                           "Not enough space for young surv words summary.");
  2513   memset(_surviving_young_words, 0, array_length * sizeof(size_t));
  2514   for (size_t i = 0;  i < array_length; ++i) {
  2515     guarantee( _surviving_young_words[i] == 0, "invariant" );
  2519 void
  2520 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  2521   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  2522   size_t array_length = g1_policy()->young_cset_length();
  2523   for (size_t i = 0; i < array_length; ++i)
  2524     _surviving_young_words[i] += surv_young_words[i];
  2527 void
  2528 G1CollectedHeap::cleanup_surviving_young_words() {
  2529   guarantee( _surviving_young_words != NULL, "pre-condition" );
  2530   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
  2531   _surviving_young_words = NULL;
  2534 // </NEW PREDICTION>
  2536 void
  2537 G1CollectedHeap::do_collection_pause_at_safepoint() {
  2538   char verbose_str[128];
  2539   sprintf(verbose_str, "GC pause ");
  2540   if (g1_policy()->in_young_gc_mode()) {
  2541     if (g1_policy()->full_young_gcs())
  2542       strcat(verbose_str, "(young)");
  2543     else
  2544       strcat(verbose_str, "(partial)");
  2546   if (g1_policy()->should_initiate_conc_mark())
  2547     strcat(verbose_str, " (initial-mark)");
  2549   GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
  2551   // if PrintGCDetails is on, we'll print long statistics information
  2552   // in the collector policy code, so let's not print this as the output
  2553   // is messy if we do.
  2554   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  2555   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  2556   TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
  2558   ResourceMark rm;
  2559   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  2560   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
  2561   guarantee(!is_gc_active(), "collection is not reentrant");
  2562   assert(regions_accounted_for(), "Region leakage!");
  2564   increment_gc_time_stamp();
  2566   if (g1_policy()->in_young_gc_mode()) {
  2567     assert(check_young_list_well_formed(),
  2568                 "young list should be well formed");
  2571   if (GC_locker::is_active()) {
  2572     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
  2575   bool abandoned = false;
  2576   { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  2577     IsGCActiveMark x;
  2579     gc_prologue(false);
  2580     increment_total_collections();
  2582 #if G1_REM_SET_LOGGING
  2583     gclog_or_tty->print_cr("\nJust chose CS, heap:");
  2584     print();
  2585 #endif
  2587     if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  2588       HandleMark hm;  // Discard invalid handles created during verification
  2589       prepare_for_verify();
  2590       gclog_or_tty->print(" VerifyBeforeGC:");
  2591       Universe::verify(false);
  2594     COMPILER2_PRESENT(DerivedPointerTable::clear());
  2596     // We want to turn off ref discovery, if necessary, and turn it back on
  2597     // on again later if we do.
  2598     bool was_enabled = ref_processor()->discovery_enabled();
  2599     if (was_enabled) ref_processor()->disable_discovery();
  2601     // Forget the current alloc region (we might even choose it to be part
  2602     // of the collection set!).
  2603     abandon_cur_alloc_region();
  2605     // The elapsed time induced by the start time below deliberately elides
  2606     // the possible verification above.
  2607     double start_time_sec = os::elapsedTime();
  2608     GCOverheadReporter::recordSTWStart(start_time_sec);
  2609     size_t start_used_bytes = used();
  2610     if (!G1ConcMark) {
  2611       do_sync_mark();
  2614     g1_policy()->record_collection_pause_start(start_time_sec,
  2615                                                start_used_bytes);
  2617     guarantee(_in_cset_fast_test == NULL, "invariant");
  2618     guarantee(_in_cset_fast_test_base == NULL, "invariant");
  2619     _in_cset_fast_test_length = max_regions();
  2620     _in_cset_fast_test_base =
  2621                              NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
  2622     memset(_in_cset_fast_test_base, false,
  2623                                      _in_cset_fast_test_length * sizeof(bool));
  2624     // We're biasing _in_cset_fast_test to avoid subtracting the
  2625     // beginning of the heap every time we want to index; basically
  2626     // it's the same with what we do with the card table.
  2627     _in_cset_fast_test = _in_cset_fast_test_base -
  2628               ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  2630 #if SCAN_ONLY_VERBOSE
  2631     _young_list->print();
  2632 #endif // SCAN_ONLY_VERBOSE
  2634     if (g1_policy()->should_initiate_conc_mark()) {
  2635       concurrent_mark()->checkpointRootsInitialPre();
  2637     save_marks();
  2639     // We must do this before any possible evacuation that should propagate
  2640     // marks.
  2641     if (mark_in_progress()) {
  2642       double start_time_sec = os::elapsedTime();
  2644       _cm->drainAllSATBBuffers();
  2645       double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  2646       g1_policy()->record_satb_drain_time(finish_mark_ms);
  2649     // Record the number of elements currently on the mark stack, so we
  2650     // only iterate over these.  (Since evacuation may add to the mark
  2651     // stack, doing more exposes race conditions.)  If no mark is in
  2652     // progress, this will be zero.
  2653     _cm->set_oops_do_bound();
  2655     assert(regions_accounted_for(), "Region leakage.");
  2657     if (mark_in_progress())
  2658       concurrent_mark()->newCSet();
  2660     // Now choose the CS.
  2661     g1_policy()->choose_collection_set();
  2663     // We may abandon a pause if we find no region that will fit in the MMU
  2664     // pause.
  2665     bool abandoned = (g1_policy()->collection_set() == NULL);
  2667     // Nothing to do if we were unable to choose a collection set.
  2668     if (!abandoned) {
  2669 #if G1_REM_SET_LOGGING
  2670       gclog_or_tty->print_cr("\nAfter pause, heap:");
  2671       print();
  2672 #endif
  2674       setup_surviving_young_words();
  2676       // Set up the gc allocation regions.
  2677       get_gc_alloc_regions();
  2679       // Actually do the work...
  2680       evacuate_collection_set();
  2681       free_collection_set(g1_policy()->collection_set());
  2682       g1_policy()->clear_collection_set();
  2684       FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
  2685       // this is more for peace of mind; we're nulling them here and
  2686       // we're expecting them to be null at the beginning of the next GC
  2687       _in_cset_fast_test = NULL;
  2688       _in_cset_fast_test_base = NULL;
  2690       release_gc_alloc_regions(false /* totally */);
  2692       cleanup_surviving_young_words();
  2694       if (g1_policy()->in_young_gc_mode()) {
  2695         _young_list->reset_sampled_info();
  2696         assert(check_young_list_empty(true),
  2697                "young list should be empty");
  2699 #if SCAN_ONLY_VERBOSE
  2700         _young_list->print();
  2701 #endif // SCAN_ONLY_VERBOSE
  2703         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  2704                                              _young_list->first_survivor_region(),
  2705                                              _young_list->last_survivor_region());
  2706         _young_list->reset_auxilary_lists();
  2708     } else {
  2709       COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  2712     if (evacuation_failed()) {
  2713       _summary_bytes_used = recalculate_used();
  2714     } else {
  2715       // The "used" of the the collection set have already been subtracted
  2716       // when they were freed.  Add in the bytes evacuated.
  2717       _summary_bytes_used += g1_policy()->bytes_in_to_space();
  2720     if (g1_policy()->in_young_gc_mode() &&
  2721         g1_policy()->should_initiate_conc_mark()) {
  2722       concurrent_mark()->checkpointRootsInitialPost();
  2723       set_marking_started();
  2724       doConcurrentMark();
  2727 #if SCAN_ONLY_VERBOSE
  2728     _young_list->print();
  2729 #endif // SCAN_ONLY_VERBOSE
  2731     double end_time_sec = os::elapsedTime();
  2732     double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  2733     g1_policy()->record_pause_time_ms(pause_time_ms);
  2734     GCOverheadReporter::recordSTWEnd(end_time_sec);
  2735     g1_policy()->record_collection_pause_end(abandoned);
  2737     assert(regions_accounted_for(), "Region leakage.");
  2739     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  2740       HandleMark hm;  // Discard invalid handles created during verification
  2741       gclog_or_tty->print(" VerifyAfterGC:");
  2742       prepare_for_verify();
  2743       Universe::verify(false);
  2746     if (was_enabled) ref_processor()->enable_discovery();
  2749       size_t expand_bytes = g1_policy()->expansion_amount();
  2750       if (expand_bytes > 0) {
  2751         size_t bytes_before = capacity();
  2752         expand(expand_bytes);
  2756     if (mark_in_progress()) {
  2757       concurrent_mark()->update_g1_committed();
  2760 #ifdef TRACESPINNING
  2761     ParallelTaskTerminator::print_termination_counts();
  2762 #endif
  2764     gc_epilogue(false);
  2767   assert(verify_region_lists(), "Bad region lists.");
  2769   if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
  2770     gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
  2771     print_tracing_info();
  2772     vm_exit(-1);
  2776 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
  2777   assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
  2778   // make sure we don't call set_gc_alloc_region() multiple times on
  2779   // the same region
  2780   assert(r == NULL || !r->is_gc_alloc_region(),
  2781          "shouldn't already be a GC alloc region");
  2782   HeapWord* original_top = NULL;
  2783   if (r != NULL)
  2784     original_top = r->top();
  2786   // We will want to record the used space in r as being there before gc.
  2787   // One we install it as a GC alloc region it's eligible for allocation.
  2788   // So record it now and use it later.
  2789   size_t r_used = 0;
  2790   if (r != NULL) {
  2791     r_used = r->used();
  2793     if (ParallelGCThreads > 0) {
  2794       // need to take the lock to guard against two threads calling
  2795       // get_gc_alloc_region concurrently (very unlikely but...)
  2796       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  2797       r->save_marks();
  2800   HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
  2801   _gc_alloc_regions[purpose] = r;
  2802   if (old_alloc_region != NULL) {
  2803     // Replace aliases too.
  2804     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  2805       if (_gc_alloc_regions[ap] == old_alloc_region) {
  2806         _gc_alloc_regions[ap] = r;
  2810   if (r != NULL) {
  2811     push_gc_alloc_region(r);
  2812     if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
  2813       // We are using a region as a GC alloc region after it has been used
  2814       // as a mutator allocation region during the current marking cycle.
  2815       // The mutator-allocated objects are currently implicitly marked, but
  2816       // when we move hr->next_top_at_mark_start() forward at the the end
  2817       // of the GC pause, they won't be.  We therefore mark all objects in
  2818       // the "gap".  We do this object-by-object, since marking densely
  2819       // does not currently work right with marking bitmap iteration.  This
  2820       // means we rely on TLAB filling at the start of pauses, and no
  2821       // "resuscitation" of filled TLAB's.  If we want to do this, we need
  2822       // to fix the marking bitmap iteration.
  2823       HeapWord* curhw = r->next_top_at_mark_start();
  2824       HeapWord* t = original_top;
  2826       while (curhw < t) {
  2827         oop cur = (oop)curhw;
  2828         // We'll assume parallel for generality.  This is rare code.
  2829         concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
  2830         curhw = curhw + cur->size();
  2832       assert(curhw == t, "Should have parsed correctly.");
  2834     if (G1PolicyVerbose > 1) {
  2835       gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
  2836                           "for survivors:", r->bottom(), original_top, r->end());
  2837       r->print();
  2839     g1_policy()->record_before_bytes(r_used);
  2843 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
  2844   assert(Thread::current()->is_VM_thread() ||
  2845          par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
  2846   assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
  2847          "Precondition.");
  2848   hr->set_is_gc_alloc_region(true);
  2849   hr->set_next_gc_alloc_region(_gc_alloc_region_list);
  2850   _gc_alloc_region_list = hr;
  2853 #ifdef G1_DEBUG
  2854 class FindGCAllocRegion: public HeapRegionClosure {
  2855 public:
  2856   bool doHeapRegion(HeapRegion* r) {
  2857     if (r->is_gc_alloc_region()) {
  2858       gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
  2859                              r->hrs_index(), r->bottom());
  2861     return false;
  2863 };
  2864 #endif // G1_DEBUG
  2866 void G1CollectedHeap::forget_alloc_region_list() {
  2867   assert(Thread::current()->is_VM_thread(), "Precondition");
  2868   while (_gc_alloc_region_list != NULL) {
  2869     HeapRegion* r = _gc_alloc_region_list;
  2870     assert(r->is_gc_alloc_region(), "Invariant.");
  2871     // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
  2872     // newly allocated data in order to be able to apply deferred updates
  2873     // before the GC is done for verification purposes (i.e to allow
  2874     // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
  2875     // collection.
  2876     r->ContiguousSpace::set_saved_mark();
  2877     _gc_alloc_region_list = r->next_gc_alloc_region();
  2878     r->set_next_gc_alloc_region(NULL);
  2879     r->set_is_gc_alloc_region(false);
  2880     if (r->is_survivor()) {
  2881       if (r->is_empty()) {
  2882         r->set_not_young();
  2883       } else {
  2884         _young_list->add_survivor_region(r);
  2887     if (r->is_empty()) {
  2888       ++_free_regions;
  2891 #ifdef G1_DEBUG
  2892   FindGCAllocRegion fa;
  2893   heap_region_iterate(&fa);
  2894 #endif // G1_DEBUG
  2898 bool G1CollectedHeap::check_gc_alloc_regions() {
  2899   // TODO: allocation regions check
  2900   return true;
  2903 void G1CollectedHeap::get_gc_alloc_regions() {
  2904   // First, let's check that the GC alloc region list is empty (it should)
  2905   assert(_gc_alloc_region_list == NULL, "invariant");
  2907   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  2908     assert(_gc_alloc_regions[ap] == NULL, "invariant");
  2910     // Create new GC alloc regions.
  2911     HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
  2912     _retained_gc_alloc_regions[ap] = NULL;
  2914     if (alloc_region != NULL) {
  2915       assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
  2917       // let's make sure that the GC alloc region is not tagged as such
  2918       // outside a GC operation
  2919       assert(!alloc_region->is_gc_alloc_region(), "sanity");
  2921       if (alloc_region->in_collection_set() ||
  2922           alloc_region->top() == alloc_region->end() ||
  2923           alloc_region->top() == alloc_region->bottom()) {
  2924         // we will discard the current GC alloc region if it's in the
  2925         // collection set (it can happen!), if it's already full (no
  2926         // point in using it), or if it's empty (this means that it
  2927         // was emptied during a cleanup and it should be on the free
  2928         // list now).
  2930         alloc_region = NULL;
  2934     if (alloc_region == NULL) {
  2935       // we will get a new GC alloc region
  2936       alloc_region = newAllocRegionWithExpansion(ap, 0);
  2939     if (alloc_region != NULL) {
  2940       assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
  2941       set_gc_alloc_region(ap, alloc_region);
  2944     assert(_gc_alloc_regions[ap] == NULL ||
  2945            _gc_alloc_regions[ap]->is_gc_alloc_region(),
  2946            "the GC alloc region should be tagged as such");
  2947     assert(_gc_alloc_regions[ap] == NULL ||
  2948            _gc_alloc_regions[ap] == _gc_alloc_region_list,
  2949            "the GC alloc region should be the same as the GC alloc list head");
  2951   // Set alternative regions for allocation purposes that have reached
  2952   // their limit.
  2953   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  2954     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
  2955     if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
  2956       _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
  2959   assert(check_gc_alloc_regions(), "alloc regions messed up");
  2962 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
  2963   // We keep a separate list of all regions that have been alloc regions in
  2964   // the current collection pause. Forget that now. This method will
  2965   // untag the GC alloc regions and tear down the GC alloc region
  2966   // list. It's desirable that no regions are tagged as GC alloc
  2967   // outside GCs.
  2968   forget_alloc_region_list();
  2970   // The current alloc regions contain objs that have survived
  2971   // collection. Make them no longer GC alloc regions.
  2972   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  2973     HeapRegion* r = _gc_alloc_regions[ap];
  2974     _retained_gc_alloc_regions[ap] = NULL;
  2976     if (r != NULL) {
  2977       // we retain nothing on _gc_alloc_regions between GCs
  2978       set_gc_alloc_region(ap, NULL);
  2979       _gc_alloc_region_counts[ap] = 0;
  2981       if (r->is_empty()) {
  2982         // we didn't actually allocate anything in it; let's just put
  2983         // it on the free list
  2984         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  2985         r->set_zero_fill_complete();
  2986         put_free_region_on_list_locked(r);
  2987       } else if (_retain_gc_alloc_region[ap] && !totally) {
  2988         // retain it so that we can use it at the beginning of the next GC
  2989         _retained_gc_alloc_regions[ap] = r;
  2995 #ifndef PRODUCT
  2996 // Useful for debugging
  2998 void G1CollectedHeap::print_gc_alloc_regions() {
  2999   gclog_or_tty->print_cr("GC alloc regions");
  3000   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3001     HeapRegion* r = _gc_alloc_regions[ap];
  3002     if (r == NULL) {
  3003       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT, ap, NULL);
  3004     } else {
  3005       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT" "SIZE_FORMAT,
  3006                              ap, r->bottom(), r->used());
  3010 #endif // PRODUCT
  3012 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  3013   _drain_in_progress = false;
  3014   set_evac_failure_closure(cl);
  3015   _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3018 void G1CollectedHeap::finalize_for_evac_failure() {
  3019   assert(_evac_failure_scan_stack != NULL &&
  3020          _evac_failure_scan_stack->length() == 0,
  3021          "Postcondition");
  3022   assert(!_drain_in_progress, "Postcondition");
  3023   // Don't have to delete, since the scan stack is a resource object.
  3024   _evac_failure_scan_stack = NULL;
  3029 // *** Sequential G1 Evacuation
  3031 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
  3032   HeapRegion* alloc_region = _gc_alloc_regions[purpose];
  3033   // let the caller handle alloc failure
  3034   if (alloc_region == NULL) return NULL;
  3035   assert(isHumongous(word_size) || !alloc_region->isHumongous(),
  3036          "Either the object is humongous or the region isn't");
  3037   HeapWord* block = alloc_region->allocate(word_size);
  3038   if (block == NULL) {
  3039     block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
  3041   return block;
  3044 class G1IsAliveClosure: public BoolObjectClosure {
  3045   G1CollectedHeap* _g1;
  3046 public:
  3047   G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3048   void do_object(oop p) { assert(false, "Do not call."); }
  3049   bool do_object_b(oop p) {
  3050     // It is reachable if it is outside the collection set, or is inside
  3051     // and forwarded.
  3053 #ifdef G1_DEBUG
  3054     gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
  3055                            (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
  3056                            !_g1->obj_in_cs(p) || p->is_forwarded());
  3057 #endif // G1_DEBUG
  3059     return !_g1->obj_in_cs(p) || p->is_forwarded();
  3061 };
  3063 class G1KeepAliveClosure: public OopClosure {
  3064   G1CollectedHeap* _g1;
  3065 public:
  3066   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3067   void do_oop(narrowOop* p) {
  3068     guarantee(false, "NYI");
  3070   void do_oop(oop* p) {
  3071     oop obj = *p;
  3072 #ifdef G1_DEBUG
  3073     if (PrintGC && Verbose) {
  3074       gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
  3075                              p, (void*) obj, (void*) *p);
  3077 #endif // G1_DEBUG
  3079     if (_g1->obj_in_cs(obj)) {
  3080       assert( obj->is_forwarded(), "invariant" );
  3081       *p = obj->forwardee();
  3083 #ifdef G1_DEBUG
  3084       gclog_or_tty->print_cr("     in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
  3085                              (void*) obj, (void*) *p);
  3086 #endif // G1_DEBUG
  3089 };
  3091 class UpdateRSetImmediate : public OopsInHeapRegionClosure {
  3092 private:
  3093   G1CollectedHeap* _g1;
  3094   G1RemSet* _g1_rem_set;
  3095 public:
  3096   UpdateRSetImmediate(G1CollectedHeap* g1) :
  3097     _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
  3099   void do_oop(narrowOop* p) {
  3100     guarantee(false, "NYI");
  3102   void do_oop(oop* p) {
  3103     assert(_from->is_in_reserved(p), "paranoia");
  3104     if (*p != NULL && !_from->is_survivor()) {
  3105       _g1_rem_set->par_write_ref(_from, p, 0);
  3108 };
  3110 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
  3111 private:
  3112   G1CollectedHeap* _g1;
  3113   DirtyCardQueue *_dcq;
  3114   CardTableModRefBS* _ct_bs;
  3116 public:
  3117   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
  3118     _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
  3120   void do_oop(narrowOop* p) {
  3121     guarantee(false, "NYI");
  3123   void do_oop(oop* p) {
  3124     assert(_from->is_in_reserved(p), "paranoia");
  3125     if (!_from->is_in_reserved(*p) && !_from->is_survivor()) {
  3126       size_t card_index = _ct_bs->index_for(p);
  3127       if (_ct_bs->mark_card_deferred(card_index)) {
  3128         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
  3132 };
  3136 class RemoveSelfPointerClosure: public ObjectClosure {
  3137 private:
  3138   G1CollectedHeap* _g1;
  3139   ConcurrentMark* _cm;
  3140   HeapRegion* _hr;
  3141   size_t _prev_marked_bytes;
  3142   size_t _next_marked_bytes;
  3143   OopsInHeapRegionClosure *_cl;
  3144 public:
  3145   RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
  3146     _g1(g1), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
  3147     _next_marked_bytes(0), _cl(cl) {}
  3149   size_t prev_marked_bytes() { return _prev_marked_bytes; }
  3150   size_t next_marked_bytes() { return _next_marked_bytes; }
  3152   // The original idea here was to coalesce evacuated and dead objects.
  3153   // However that caused complications with the block offset table (BOT).
  3154   // In particular if there were two TLABs, one of them partially refined.
  3155   // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
  3156   // The BOT entries of the unrefined part of TLAB_2 point to the start
  3157   // of TLAB_2. If the last object of the TLAB_1 and the first object
  3158   // of TLAB_2 are coalesced, then the cards of the unrefined part
  3159   // would point into middle of the filler object.
  3160   //
  3161   // The current approach is to not coalesce and leave the BOT contents intact.
  3162   void do_object(oop obj) {
  3163     if (obj->is_forwarded() && obj->forwardee() == obj) {
  3164       // The object failed to move.
  3165       assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
  3166       _cm->markPrev(obj);
  3167       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3168       _prev_marked_bytes += (obj->size() * HeapWordSize);
  3169       if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
  3170         _cm->markAndGrayObjectIfNecessary(obj);
  3172       obj->set_mark(markOopDesc::prototype());
  3173       // While we were processing RSet buffers during the
  3174       // collection, we actually didn't scan any cards on the
  3175       // collection set, since we didn't want to update remebered
  3176       // sets with entries that point into the collection set, given
  3177       // that live objects fromthe collection set are about to move
  3178       // and such entries will be stale very soon. This change also
  3179       // dealt with a reliability issue which involved scanning a
  3180       // card in the collection set and coming across an array that
  3181       // was being chunked and looking malformed. The problem is
  3182       // that, if evacuation fails, we might have remembered set
  3183       // entries missing given that we skipped cards on the
  3184       // collection set. So, we'll recreate such entries now.
  3185       obj->oop_iterate(_cl);
  3186       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3187     } else {
  3188       // The object has been either evacuated or is dead. Fill it with a
  3189       // dummy object.
  3190       MemRegion mr((HeapWord*)obj, obj->size());
  3191       CollectedHeap::fill_with_object(mr);
  3192       _cm->clearRangeBothMaps(mr);
  3195 };
  3197 void G1CollectedHeap::remove_self_forwarding_pointers() {
  3198   UpdateRSetImmediate immediate_update(_g1h);
  3199   DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
  3200   UpdateRSetDeferred deferred_update(_g1h, &dcq);
  3201   OopsInHeapRegionClosure *cl;
  3202   if (G1DeferredRSUpdate) {
  3203     cl = &deferred_update;
  3204   } else {
  3205     cl = &immediate_update;
  3207   HeapRegion* cur = g1_policy()->collection_set();
  3208   while (cur != NULL) {
  3209     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3211     RemoveSelfPointerClosure rspc(_g1h, cl);
  3212     if (cur->evacuation_failed()) {
  3213       assert(cur->in_collection_set(), "bad CS");
  3214       cl->set_region(cur);
  3215       cur->object_iterate(&rspc);
  3217       // A number of manipulations to make the TAMS be the current top,
  3218       // and the marked bytes be the ones observed in the iteration.
  3219       if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
  3220         // The comments below are the postconditions achieved by the
  3221         // calls.  Note especially the last such condition, which says that
  3222         // the count of marked bytes has been properly restored.
  3223         cur->note_start_of_marking(false);
  3224         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3225         cur->add_to_marked_bytes(rspc.prev_marked_bytes());
  3226         // _next_marked_bytes == prev_marked_bytes.
  3227         cur->note_end_of_marking();
  3228         // _prev_top_at_mark_start == top(),
  3229         // _prev_marked_bytes == prev_marked_bytes
  3231       // If there is no mark in progress, we modified the _next variables
  3232       // above needlessly, but harmlessly.
  3233       if (_g1h->mark_in_progress()) {
  3234         cur->note_start_of_marking(false);
  3235         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3236         // _next_marked_bytes == next_marked_bytes.
  3239       // Now make sure the region has the right index in the sorted array.
  3240       g1_policy()->note_change_in_marked_bytes(cur);
  3242     cur = cur->next_in_collection_set();
  3244   assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3246   // Now restore saved marks, if any.
  3247   if (_objs_with_preserved_marks != NULL) {
  3248     assert(_preserved_marks_of_objs != NULL, "Both or none.");
  3249     assert(_objs_with_preserved_marks->length() ==
  3250            _preserved_marks_of_objs->length(), "Both or none.");
  3251     guarantee(_objs_with_preserved_marks->length() ==
  3252               _preserved_marks_of_objs->length(), "Both or none.");
  3253     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
  3254       oop obj   = _objs_with_preserved_marks->at(i);
  3255       markOop m = _preserved_marks_of_objs->at(i);
  3256       obj->set_mark(m);
  3258     // Delete the preserved marks growable arrays (allocated on the C heap).
  3259     delete _objs_with_preserved_marks;
  3260     delete _preserved_marks_of_objs;
  3261     _objs_with_preserved_marks = NULL;
  3262     _preserved_marks_of_objs = NULL;
  3266 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  3267   _evac_failure_scan_stack->push(obj);
  3270 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  3271   assert(_evac_failure_scan_stack != NULL, "precondition");
  3273   while (_evac_failure_scan_stack->length() > 0) {
  3274      oop obj = _evac_failure_scan_stack->pop();
  3275      _evac_failure_closure->set_region(heap_region_containing(obj));
  3276      obj->oop_iterate_backwards(_evac_failure_closure);
  3280 void G1CollectedHeap::handle_evacuation_failure(oop old) {
  3281   markOop m = old->mark();
  3282   // forward to self
  3283   assert(!old->is_forwarded(), "precondition");
  3285   old->forward_to(old);
  3286   handle_evacuation_failure_common(old, m);
  3289 oop
  3290 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  3291                                                oop old) {
  3292   markOop m = old->mark();
  3293   oop forward_ptr = old->forward_to_atomic(old);
  3294   if (forward_ptr == NULL) {
  3295     // Forward-to-self succeeded.
  3296     if (_evac_failure_closure != cl) {
  3297       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  3298       assert(!_drain_in_progress,
  3299              "Should only be true while someone holds the lock.");
  3300       // Set the global evac-failure closure to the current thread's.
  3301       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  3302       set_evac_failure_closure(cl);
  3303       // Now do the common part.
  3304       handle_evacuation_failure_common(old, m);
  3305       // Reset to NULL.
  3306       set_evac_failure_closure(NULL);
  3307     } else {
  3308       // The lock is already held, and this is recursive.
  3309       assert(_drain_in_progress, "This should only be the recursive case.");
  3310       handle_evacuation_failure_common(old, m);
  3312     return old;
  3313   } else {
  3314     // Someone else had a place to copy it.
  3315     return forward_ptr;
  3319 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  3320   set_evacuation_failed(true);
  3322   preserve_mark_if_necessary(old, m);
  3324   HeapRegion* r = heap_region_containing(old);
  3325   if (!r->evacuation_failed()) {
  3326     r->set_evacuation_failed(true);
  3327     if (G1PrintRegions) {
  3328       gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
  3329                           "["PTR_FORMAT","PTR_FORMAT")\n",
  3330                           r, r->bottom(), r->end());
  3334   push_on_evac_failure_scan_stack(old);
  3336   if (!_drain_in_progress) {
  3337     // prevent recursion in copy_to_survivor_space()
  3338     _drain_in_progress = true;
  3339     drain_evac_failure_scan_stack();
  3340     _drain_in_progress = false;
  3344 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  3345   if (m != markOopDesc::prototype()) {
  3346     if (_objs_with_preserved_marks == NULL) {
  3347       assert(_preserved_marks_of_objs == NULL, "Both or none.");
  3348       _objs_with_preserved_marks =
  3349         new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3350       _preserved_marks_of_objs =
  3351         new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
  3353     _objs_with_preserved_marks->push(obj);
  3354     _preserved_marks_of_objs->push(m);
  3358 // *** Parallel G1 Evacuation
  3360 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  3361                                                   size_t word_size) {
  3362   HeapRegion* alloc_region = _gc_alloc_regions[purpose];
  3363   // let the caller handle alloc failure
  3364   if (alloc_region == NULL) return NULL;
  3366   HeapWord* block = alloc_region->par_allocate(word_size);
  3367   if (block == NULL) {
  3368     MutexLockerEx x(par_alloc_during_gc_lock(),
  3369                     Mutex::_no_safepoint_check_flag);
  3370     block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
  3372   return block;
  3375 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
  3376                                             bool par) {
  3377   // Another thread might have obtained alloc_region for the given
  3378   // purpose, and might be attempting to allocate in it, and might
  3379   // succeed.  Therefore, we can't do the "finalization" stuff on the
  3380   // region below until we're sure the last allocation has happened.
  3381   // We ensure this by allocating the remaining space with a garbage
  3382   // object.
  3383   if (par) par_allocate_remaining_space(alloc_region);
  3384   // Now we can do the post-GC stuff on the region.
  3385   alloc_region->note_end_of_copying();
  3386   g1_policy()->record_after_bytes(alloc_region->used());
  3389 HeapWord*
  3390 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
  3391                                          HeapRegion*    alloc_region,
  3392                                          bool           par,
  3393                                          size_t         word_size) {
  3394   HeapWord* block = NULL;
  3395   // In the parallel case, a previous thread to obtain the lock may have
  3396   // already assigned a new gc_alloc_region.
  3397   if (alloc_region != _gc_alloc_regions[purpose]) {
  3398     assert(par, "But should only happen in parallel case.");
  3399     alloc_region = _gc_alloc_regions[purpose];
  3400     if (alloc_region == NULL) return NULL;
  3401     block = alloc_region->par_allocate(word_size);
  3402     if (block != NULL) return block;
  3403     // Otherwise, continue; this new region is empty, too.
  3405   assert(alloc_region != NULL, "We better have an allocation region");
  3406   retire_alloc_region(alloc_region, par);
  3408   if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
  3409     // Cannot allocate more regions for the given purpose.
  3410     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
  3411     // Is there an alternative?
  3412     if (purpose != alt_purpose) {
  3413       HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
  3414       // Has not the alternative region been aliased?
  3415       if (alloc_region != alt_region && alt_region != NULL) {
  3416         // Try to allocate in the alternative region.
  3417         if (par) {
  3418           block = alt_region->par_allocate(word_size);
  3419         } else {
  3420           block = alt_region->allocate(word_size);
  3422         // Make an alias.
  3423         _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
  3424         if (block != NULL) {
  3425           return block;
  3427         retire_alloc_region(alt_region, par);
  3429       // Both the allocation region and the alternative one are full
  3430       // and aliased, replace them with a new allocation region.
  3431       purpose = alt_purpose;
  3432     } else {
  3433       set_gc_alloc_region(purpose, NULL);
  3434       return NULL;
  3438   // Now allocate a new region for allocation.
  3439   alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
  3441   // let the caller handle alloc failure
  3442   if (alloc_region != NULL) {
  3444     assert(check_gc_alloc_regions(), "alloc regions messed up");
  3445     assert(alloc_region->saved_mark_at_top(),
  3446            "Mark should have been saved already.");
  3447     // We used to assert that the region was zero-filled here, but no
  3448     // longer.
  3450     // This must be done last: once it's installed, other regions may
  3451     // allocate in it (without holding the lock.)
  3452     set_gc_alloc_region(purpose, alloc_region);
  3454     if (par) {
  3455       block = alloc_region->par_allocate(word_size);
  3456     } else {
  3457       block = alloc_region->allocate(word_size);
  3459     // Caller handles alloc failure.
  3460   } else {
  3461     // This sets other apis using the same old alloc region to NULL, also.
  3462     set_gc_alloc_region(purpose, NULL);
  3464   return block;  // May be NULL.
  3467 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
  3468   HeapWord* block = NULL;
  3469   size_t free_words;
  3470   do {
  3471     free_words = r->free()/HeapWordSize;
  3472     // If there's too little space, no one can allocate, so we're done.
  3473     if (free_words < (size_t)oopDesc::header_size()) return;
  3474     // Otherwise, try to claim it.
  3475     block = r->par_allocate(free_words);
  3476   } while (block == NULL);
  3477   fill_with_object(block, free_words);
  3480 #define use_local_bitmaps         1
  3481 #define verify_local_bitmaps      0
  3483 #ifndef PRODUCT
  3485 class GCLabBitMap;
  3486 class GCLabBitMapClosure: public BitMapClosure {
  3487 private:
  3488   ConcurrentMark* _cm;
  3489   GCLabBitMap*    _bitmap;
  3491 public:
  3492   GCLabBitMapClosure(ConcurrentMark* cm,
  3493                      GCLabBitMap* bitmap) {
  3494     _cm     = cm;
  3495     _bitmap = bitmap;
  3498   virtual bool do_bit(size_t offset);
  3499 };
  3501 #endif // PRODUCT
  3503 #define oop_buffer_length 256
  3505 class GCLabBitMap: public BitMap {
  3506 private:
  3507   ConcurrentMark* _cm;
  3509   int       _shifter;
  3510   size_t    _bitmap_word_covers_words;
  3512   // beginning of the heap
  3513   HeapWord* _heap_start;
  3515   // this is the actual start of the GCLab
  3516   HeapWord* _real_start_word;
  3518   // this is the actual end of the GCLab
  3519   HeapWord* _real_end_word;
  3521   // this is the first word, possibly located before the actual start
  3522   // of the GCLab, that corresponds to the first bit of the bitmap
  3523   HeapWord* _start_word;
  3525   // size of a GCLab in words
  3526   size_t _gclab_word_size;
  3528   static int shifter() {
  3529     return MinObjAlignment - 1;
  3532   // how many heap words does a single bitmap word corresponds to?
  3533   static size_t bitmap_word_covers_words() {
  3534     return BitsPerWord << shifter();
  3537   static size_t gclab_word_size() {
  3538     return G1ParallelGCAllocBufferSize / HeapWordSize;
  3541   static size_t bitmap_size_in_bits() {
  3542     size_t bits_in_bitmap = gclab_word_size() >> shifter();
  3543     // We are going to ensure that the beginning of a word in this
  3544     // bitmap also corresponds to the beginning of a word in the
  3545     // global marking bitmap. To handle the case where a GCLab
  3546     // starts from the middle of the bitmap, we need to add enough
  3547     // space (i.e. up to a bitmap word) to ensure that we have
  3548     // enough bits in the bitmap.
  3549     return bits_in_bitmap + BitsPerWord - 1;
  3551 public:
  3552   GCLabBitMap(HeapWord* heap_start)
  3553     : BitMap(bitmap_size_in_bits()),
  3554       _cm(G1CollectedHeap::heap()->concurrent_mark()),
  3555       _shifter(shifter()),
  3556       _bitmap_word_covers_words(bitmap_word_covers_words()),
  3557       _heap_start(heap_start),
  3558       _gclab_word_size(gclab_word_size()),
  3559       _real_start_word(NULL),
  3560       _real_end_word(NULL),
  3561       _start_word(NULL)
  3563     guarantee( size_in_words() >= bitmap_size_in_words(),
  3564                "just making sure");
  3567   inline unsigned heapWordToOffset(HeapWord* addr) {
  3568     unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
  3569     assert(offset < size(), "offset should be within bounds");
  3570     return offset;
  3573   inline HeapWord* offsetToHeapWord(size_t offset) {
  3574     HeapWord* addr =  _start_word + (offset << _shifter);
  3575     assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
  3576     return addr;
  3579   bool fields_well_formed() {
  3580     bool ret1 = (_real_start_word == NULL) &&
  3581                 (_real_end_word == NULL) &&
  3582                 (_start_word == NULL);
  3583     if (ret1)
  3584       return true;
  3586     bool ret2 = _real_start_word >= _start_word &&
  3587       _start_word < _real_end_word &&
  3588       (_real_start_word + _gclab_word_size) == _real_end_word &&
  3589       (_start_word + _gclab_word_size + _bitmap_word_covers_words)
  3590                                                               > _real_end_word;
  3591     return ret2;
  3594   inline bool mark(HeapWord* addr) {
  3595     guarantee(use_local_bitmaps, "invariant");
  3596     assert(fields_well_formed(), "invariant");
  3598     if (addr >= _real_start_word && addr < _real_end_word) {
  3599       assert(!isMarked(addr), "should not have already been marked");
  3601       // first mark it on the bitmap
  3602       at_put(heapWordToOffset(addr), true);
  3604       return true;
  3605     } else {
  3606       return false;
  3610   inline bool isMarked(HeapWord* addr) {
  3611     guarantee(use_local_bitmaps, "invariant");
  3612     assert(fields_well_formed(), "invariant");
  3614     return at(heapWordToOffset(addr));
  3617   void set_buffer(HeapWord* start) {
  3618     guarantee(use_local_bitmaps, "invariant");
  3619     clear();
  3621     assert(start != NULL, "invariant");
  3622     _real_start_word = start;
  3623     _real_end_word   = start + _gclab_word_size;
  3625     size_t diff =
  3626       pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
  3627     _start_word = start - diff;
  3629     assert(fields_well_formed(), "invariant");
  3632 #ifndef PRODUCT
  3633   void verify() {
  3634     // verify that the marks have been propagated
  3635     GCLabBitMapClosure cl(_cm, this);
  3636     iterate(&cl);
  3638 #endif // PRODUCT
  3640   void retire() {
  3641     guarantee(use_local_bitmaps, "invariant");
  3642     assert(fields_well_formed(), "invariant");
  3644     if (_start_word != NULL) {
  3645       CMBitMap*       mark_bitmap = _cm->nextMarkBitMap();
  3647       // this means that the bitmap was set up for the GCLab
  3648       assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
  3650       mark_bitmap->mostly_disjoint_range_union(this,
  3651                                 0, // always start from the start of the bitmap
  3652                                 _start_word,
  3653                                 size_in_words());
  3654       _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
  3656 #ifndef PRODUCT
  3657       if (use_local_bitmaps && verify_local_bitmaps)
  3658         verify();
  3659 #endif // PRODUCT
  3660     } else {
  3661       assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
  3665   static size_t bitmap_size_in_words() {
  3666     return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
  3668 };
  3670 #ifndef PRODUCT
  3672 bool GCLabBitMapClosure::do_bit(size_t offset) {
  3673   HeapWord* addr = _bitmap->offsetToHeapWord(offset);
  3674   guarantee(_cm->isMarked(oop(addr)), "it should be!");
  3675   return true;
  3678 #endif // PRODUCT
  3680 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
  3681 private:
  3682   bool        _retired;
  3683   bool        _during_marking;
  3684   GCLabBitMap _bitmap;
  3686 public:
  3687   G1ParGCAllocBuffer() :
  3688     ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
  3689     _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
  3690     _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
  3691     _retired(false)
  3692   { }
  3694   inline bool mark(HeapWord* addr) {
  3695     guarantee(use_local_bitmaps, "invariant");
  3696     assert(_during_marking, "invariant");
  3697     return _bitmap.mark(addr);
  3700   inline void set_buf(HeapWord* buf) {
  3701     if (use_local_bitmaps && _during_marking)
  3702       _bitmap.set_buffer(buf);
  3703     ParGCAllocBuffer::set_buf(buf);
  3704     _retired = false;
  3707   inline void retire(bool end_of_gc, bool retain) {
  3708     if (_retired)
  3709       return;
  3710     if (use_local_bitmaps && _during_marking) {
  3711       _bitmap.retire();
  3713     ParGCAllocBuffer::retire(end_of_gc, retain);
  3714     _retired = true;
  3716 };
  3719 class G1ParScanThreadState : public StackObj {
  3720 protected:
  3721   G1CollectedHeap* _g1h;
  3722   RefToScanQueue*  _refs;
  3723   DirtyCardQueue   _dcq;
  3724   CardTableModRefBS* _ct_bs;
  3725   G1RemSet* _g1_rem;
  3727   typedef GrowableArray<oop*> OverflowQueue;
  3728   OverflowQueue* _overflowed_refs;
  3730   G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
  3731   ageTable           _age_table;
  3733   size_t           _alloc_buffer_waste;
  3734   size_t           _undo_waste;
  3736   OopsInHeapRegionClosure*      _evac_failure_cl;
  3737   G1ParScanHeapEvacClosure*     _evac_cl;
  3738   G1ParScanPartialArrayClosure* _partial_scan_cl;
  3740   int _hash_seed;
  3741   int _queue_num;
  3743   int _term_attempts;
  3744 #if G1_DETAILED_STATS
  3745   int _pushes, _pops, _steals, _steal_attempts;
  3746   int _overflow_pushes;
  3747 #endif
  3749   double _start;
  3750   double _start_strong_roots;
  3751   double _strong_roots_time;
  3752   double _start_term;
  3753   double _term_time;
  3755   // Map from young-age-index (0 == not young, 1 is youngest) to
  3756   // surviving words. base is what we get back from the malloc call
  3757   size_t* _surviving_young_words_base;
  3758   // this points into the array, as we use the first few entries for padding
  3759   size_t* _surviving_young_words;
  3761 #define PADDING_ELEM_NUM (64 / sizeof(size_t))
  3763   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
  3765   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
  3767   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
  3768   CardTableModRefBS* ctbs()                      { return _ct_bs; }
  3770   void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
  3771     if (!from->is_survivor()) {
  3772       _g1_rem->par_write_ref(from, p, tid);
  3776   void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
  3777     // If the new value of the field points to the same region or
  3778     // is the to-space, we don't need to include it in the Rset updates.
  3779     if (!from->is_in_reserved(*p) && !from->is_survivor()) {
  3780       size_t card_index = ctbs()->index_for(p);
  3781       // If the card hasn't been added to the buffer, do it.
  3782       if (ctbs()->mark_card_deferred(card_index)) {
  3783         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
  3788 public:
  3789   G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
  3790     : _g1h(g1h),
  3791       _refs(g1h->task_queue(queue_num)),
  3792       _dcq(&g1h->dirty_card_queue_set()),
  3793       _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
  3794       _g1_rem(g1h->g1_rem_set()),
  3795       _hash_seed(17), _queue_num(queue_num),
  3796       _term_attempts(0),
  3797       _age_table(false),
  3798 #if G1_DETAILED_STATS
  3799       _pushes(0), _pops(0), _steals(0),
  3800       _steal_attempts(0),  _overflow_pushes(0),
  3801 #endif
  3802       _strong_roots_time(0), _term_time(0),
  3803       _alloc_buffer_waste(0), _undo_waste(0)
  3805     // we allocate G1YoungSurvRateNumRegions plus one entries, since
  3806     // we "sacrifice" entry 0 to keep track of surviving bytes for
  3807     // non-young regions (where the age is -1)
  3808     // We also add a few elements at the beginning and at the end in
  3809     // an attempt to eliminate cache contention
  3810     size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
  3811     size_t array_length = PADDING_ELEM_NUM +
  3812                           real_length +
  3813                           PADDING_ELEM_NUM;
  3814     _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
  3815     if (_surviving_young_words_base == NULL)
  3816       vm_exit_out_of_memory(array_length * sizeof(size_t),
  3817                             "Not enough space for young surv histo.");
  3818     _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  3819     memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  3821     _overflowed_refs = new OverflowQueue(10);
  3823     _start = os::elapsedTime();
  3826   ~G1ParScanThreadState() {
  3827     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
  3830   RefToScanQueue*   refs()            { return _refs;             }
  3831   OverflowQueue*    overflowed_refs() { return _overflowed_refs;  }
  3832   ageTable*         age_table()       { return &_age_table;       }
  3834   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
  3835     return &_alloc_buffers[purpose];
  3838   size_t alloc_buffer_waste()                    { return _alloc_buffer_waste; }
  3839   size_t undo_waste()                            { return _undo_waste; }
  3841   void push_on_queue(oop* ref) {
  3842     assert(ref != NULL, "invariant");
  3843     assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
  3845     if (!refs()->push(ref)) {
  3846       overflowed_refs()->push(ref);
  3847       IF_G1_DETAILED_STATS(note_overflow_push());
  3848     } else {
  3849       IF_G1_DETAILED_STATS(note_push());
  3853   void pop_from_queue(oop*& ref) {
  3854     if (!refs()->pop_local(ref)) {
  3855       ref = NULL;
  3856     } else {
  3857       assert(ref != NULL, "invariant");
  3858       assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
  3859              "invariant");
  3861       IF_G1_DETAILED_STATS(note_pop());
  3865   void pop_from_overflow_queue(oop*& ref) {
  3866     ref = overflowed_refs()->pop();
  3869   int refs_to_scan()                             { return refs()->size();                 }
  3870   int overflowed_refs_to_scan()                  { return overflowed_refs()->length();    }
  3872   void update_rs(HeapRegion* from, oop* p, int tid) {
  3873     if (G1DeferredRSUpdate) {
  3874       deferred_rs_update(from, p, tid);
  3875     } else {
  3876       immediate_rs_update(from, p, tid);
  3880   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
  3882     HeapWord* obj = NULL;
  3883     if (word_sz * 100 <
  3884         (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
  3885                                                   ParallelGCBufferWastePct) {
  3886       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
  3887       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
  3888       alloc_buf->retire(false, false);
  3890       HeapWord* buf =
  3891         _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
  3892       if (buf == NULL) return NULL; // Let caller handle allocation failure.
  3893       // Otherwise.
  3894       alloc_buf->set_buf(buf);
  3896       obj = alloc_buf->allocate(word_sz);
  3897       assert(obj != NULL, "buffer was definitely big enough...");
  3898     } else {
  3899       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
  3901     return obj;
  3904   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
  3905     HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
  3906     if (obj != NULL) return obj;
  3907     return allocate_slow(purpose, word_sz);
  3910   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
  3911     if (alloc_buffer(purpose)->contains(obj)) {
  3912       guarantee(alloc_buffer(purpose)->contains(obj + word_sz - 1),
  3913                 "should contain whole object");
  3914       alloc_buffer(purpose)->undo_allocation(obj, word_sz);
  3915     } else {
  3916       CollectedHeap::fill_with_object(obj, word_sz);
  3917       add_to_undo_waste(word_sz);
  3921   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
  3922     _evac_failure_cl = evac_failure_cl;
  3924   OopsInHeapRegionClosure* evac_failure_closure() {
  3925     return _evac_failure_cl;
  3928   void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
  3929     _evac_cl = evac_cl;
  3932   void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
  3933     _partial_scan_cl = partial_scan_cl;
  3936   int* hash_seed() { return &_hash_seed; }
  3937   int  queue_num() { return _queue_num; }
  3939   int term_attempts()   { return _term_attempts; }
  3940   void note_term_attempt()  { _term_attempts++; }
  3942 #if G1_DETAILED_STATS
  3943   int pushes()          { return _pushes; }
  3944   int pops()            { return _pops; }
  3945   int steals()          { return _steals; }
  3946   int steal_attempts()  { return _steal_attempts; }
  3947   int overflow_pushes() { return _overflow_pushes; }
  3949   void note_push()          { _pushes++; }
  3950   void note_pop()           { _pops++; }
  3951   void note_steal()         { _steals++; }
  3952   void note_steal_attempt() { _steal_attempts++; }
  3953   void note_overflow_push() { _overflow_pushes++; }
  3954 #endif
  3956   void start_strong_roots() {
  3957     _start_strong_roots = os::elapsedTime();
  3959   void end_strong_roots() {
  3960     _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
  3962   double strong_roots_time() { return _strong_roots_time; }
  3964   void start_term_time() {
  3965     note_term_attempt();
  3966     _start_term = os::elapsedTime();
  3968   void end_term_time() {
  3969     _term_time += (os::elapsedTime() - _start_term);
  3971   double term_time() { return _term_time; }
  3973   double elapsed() {
  3974     return os::elapsedTime() - _start;
  3977   size_t* surviving_young_words() {
  3978     // We add on to hide entry 0 which accumulates surviving words for
  3979     // age -1 regions (i.e. non-young ones)
  3980     return _surviving_young_words;
  3983   void retire_alloc_buffers() {
  3984     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3985       size_t waste = _alloc_buffers[ap].words_remaining();
  3986       add_to_alloc_buffer_waste(waste);
  3987       _alloc_buffers[ap].retire(true, false);
  3991 private:
  3992   void deal_with_reference(oop* ref_to_scan) {
  3993     if (has_partial_array_mask(ref_to_scan)) {
  3994       _partial_scan_cl->do_oop_nv(ref_to_scan);
  3995     } else {
  3996       // Note: we can use "raw" versions of "region_containing" because
  3997       // "obj_to_scan" is definitely in the heap, and is not in a
  3998       // humongous region.
  3999       HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
  4000       _evac_cl->set_region(r);
  4001       _evac_cl->do_oop_nv(ref_to_scan);
  4005 public:
  4006   void trim_queue() {
  4007     // I've replicated the loop twice, first to drain the overflow
  4008     // queue, second to drain the task queue. This is better than
  4009     // having a single loop, which checks both conditions and, inside
  4010     // it, either pops the overflow queue or the task queue, as each
  4011     // loop is tighter. Also, the decision to drain the overflow queue
  4012     // first is not arbitrary, as the overflow queue is not visible
  4013     // to the other workers, whereas the task queue is. So, we want to
  4014     // drain the "invisible" entries first, while allowing the other
  4015     // workers to potentially steal the "visible" entries.
  4017     while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
  4018       while (overflowed_refs_to_scan() > 0) {
  4019         oop *ref_to_scan = NULL;
  4020         pop_from_overflow_queue(ref_to_scan);
  4021         assert(ref_to_scan != NULL, "invariant");
  4022         // We shouldn't have pushed it on the queue if it was not
  4023         // pointing into the CSet.
  4024         assert(ref_to_scan != NULL, "sanity");
  4025         assert(has_partial_array_mask(ref_to_scan) ||
  4026                                       _g1h->obj_in_cs(*ref_to_scan), "sanity");
  4028         deal_with_reference(ref_to_scan);
  4031       while (refs_to_scan() > 0) {
  4032         oop *ref_to_scan = NULL;
  4033         pop_from_queue(ref_to_scan);
  4035         if (ref_to_scan != NULL) {
  4036           // We shouldn't have pushed it on the queue if it was not
  4037           // pointing into the CSet.
  4038           assert(has_partial_array_mask(ref_to_scan) ||
  4039                                       _g1h->obj_in_cs(*ref_to_scan), "sanity");
  4041           deal_with_reference(ref_to_scan);
  4046 };
  4048 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  4049   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  4050   _par_scan_state(par_scan_state) { }
  4052 // This closure is applied to the fields of the objects that have just been copied.
  4053 // Should probably be made inline and moved in g1OopClosures.inline.hpp.
  4054 void G1ParScanClosure::do_oop_nv(oop* p) {
  4055   oop obj = *p;
  4057   if (obj != NULL) {
  4058     if (_g1->in_cset_fast_test(obj)) {
  4059       // We're not going to even bother checking whether the object is
  4060       // already forwarded or not, as this usually causes an immediate
  4061       // stall. We'll try to prefetch the object (for write, given that
  4062       // we might need to install the forwarding reference) and we'll
  4063       // get back to it when pop it from the queue
  4064       Prefetch::write(obj->mark_addr(), 0);
  4065       Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
  4067       // slightly paranoid test; I'm trying to catch potential
  4068       // problems before we go into push_on_queue to know where the
  4069       // problem is coming from
  4070       assert(obj == *p, "the value of *p should not have changed");
  4071       _par_scan_state->push_on_queue(p);
  4072     } else {
  4073       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  4078 void G1ParCopyHelper::mark_forwardee(oop* p) {
  4079   // This is called _after_ do_oop_work has been called, hence after
  4080   // the object has been relocated to its new location and *p points
  4081   // to its new location.
  4083   oop thisOop = *p;
  4084   if (thisOop != NULL) {
  4085     assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(thisOop)),
  4086            "shouldn't still be in the CSet if evacuation didn't fail.");
  4087     HeapWord* addr = (HeapWord*)thisOop;
  4088     if (_g1->is_in_g1_reserved(addr))
  4089       _cm->grayRoot(oop(addr));
  4093 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
  4094   size_t    word_sz = old->size();
  4095   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  4096   // +1 to make the -1 indexes valid...
  4097   int       young_index = from_region->young_index_in_cset()+1;
  4098   assert( (from_region->is_young() && young_index > 0) ||
  4099           (!from_region->is_young() && young_index == 0), "invariant" );
  4100   G1CollectorPolicy* g1p = _g1->g1_policy();
  4101   markOop m = old->mark();
  4102   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  4103                                            : m->age();
  4104   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  4105                                                              word_sz);
  4106   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
  4107   oop       obj     = oop(obj_ptr);
  4109   if (obj_ptr == NULL) {
  4110     // This will either forward-to-self, or detect that someone else has
  4111     // installed a forwarding pointer.
  4112     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  4113     return _g1->handle_evacuation_failure_par(cl, old);
  4116   // We're going to allocate linearly, so might as well prefetch ahead.
  4117   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
  4119   oop forward_ptr = old->forward_to_atomic(obj);
  4120   if (forward_ptr == NULL) {
  4121     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
  4122     if (g1p->track_object_age(alloc_purpose)) {
  4123       // We could simply do obj->incr_age(). However, this causes a
  4124       // performance issue. obj->incr_age() will first check whether
  4125       // the object has a displaced mark by checking its mark word;
  4126       // getting the mark word from the new location of the object
  4127       // stalls. So, given that we already have the mark word and we
  4128       // are about to install it anyway, it's better to increase the
  4129       // age on the mark word, when the object does not have a
  4130       // displaced mark word. We're not expecting many objects to have
  4131       // a displaced marked word, so that case is not optimized
  4132       // further (it could be...) and we simply call obj->incr_age().
  4134       if (m->has_displaced_mark_helper()) {
  4135         // in this case, we have to install the mark word first,
  4136         // otherwise obj looks to be forwarded (the old mark word,
  4137         // which contains the forward pointer, was copied)
  4138         obj->set_mark(m);
  4139         obj->incr_age();
  4140       } else {
  4141         m = m->incr_age();
  4142         obj->set_mark(m);
  4144       _par_scan_state->age_table()->add(obj, word_sz);
  4145     } else {
  4146       obj->set_mark(m);
  4149     // preserve "next" mark bit
  4150     if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
  4151       if (!use_local_bitmaps ||
  4152           !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
  4153         // if we couldn't mark it on the local bitmap (this happens when
  4154         // the object was not allocated in the GCLab), we have to bite
  4155         // the bullet and do the standard parallel mark
  4156         _cm->markAndGrayObjectIfNecessary(obj);
  4158 #if 1
  4159       if (_g1->isMarkedNext(old)) {
  4160         _cm->nextMarkBitMap()->parClear((HeapWord*)old);
  4162 #endif
  4165     size_t* surv_young_words = _par_scan_state->surviving_young_words();
  4166     surv_young_words[young_index] += word_sz;
  4168     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
  4169       arrayOop(old)->set_length(0);
  4170       _par_scan_state->push_on_queue(set_partial_array_mask(old));
  4171     } else {
  4172       // No point in using the slower heap_region_containing() method,
  4173       // given that we know obj is in the heap.
  4174       _scanner->set_region(_g1->heap_region_containing_raw(obj));
  4175       obj->oop_iterate_backwards(_scanner);
  4177   } else {
  4178     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
  4179     obj = forward_ptr;
  4181   return obj;
  4184 template<bool do_gen_barrier, G1Barrier barrier,
  4185          bool do_mark_forwardee, bool skip_cset_test>
  4186 void G1ParCopyClosure<do_gen_barrier, barrier,
  4187                       do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
  4188   oop obj = *p;
  4189   assert(barrier != G1BarrierRS || obj != NULL,
  4190          "Precondition: G1BarrierRS implies obj is nonNull");
  4192   // The only time we skip the cset test is when we're scanning
  4193   // references popped from the queue. And we only push on the queue
  4194   // references that we know point into the cset, so no point in
  4195   // checking again. But we'll leave an assert here for peace of mind.
  4196   assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
  4198   // here the null check is implicit in the cset_fast_test() test
  4199   if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
  4200 #if G1_REM_SET_LOGGING
  4201     gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
  4202                            "into CS.", p, (void*) obj);
  4203 #endif
  4204     if (obj->is_forwarded()) {
  4205       *p = obj->forwardee();
  4206     } else {
  4207       *p = copy_to_survivor_space(obj);
  4209     // When scanning the RS, we only care about objs in CS.
  4210     if (barrier == G1BarrierRS) {
  4211       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  4215   // When scanning moved objs, must look at all oops.
  4216   if (barrier == G1BarrierEvac && obj != NULL) {
  4217     _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  4220   if (do_gen_barrier && obj != NULL) {
  4221     par_do_barrier(p);
  4225 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
  4227 template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
  4228   oop obj, int start, int end) {
  4229   // process our set of indices (include header in first chunk)
  4230   assert(start < end, "invariant");
  4231   T* const base      = (T*)objArrayOop(obj)->base();
  4232   T* const start_addr = (start == 0) ? (T*) obj : base + start;
  4233   T* const end_addr   = base + end;
  4234   MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
  4235   _scanner.set_region(_g1->heap_region_containing(obj));
  4236   obj->oop_iterate(&_scanner, mr);
  4239 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
  4240   assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
  4241   assert(has_partial_array_mask(p), "invariant");
  4242   oop old = clear_partial_array_mask(p);
  4243   assert(old->is_objArray(), "must be obj array");
  4244   assert(old->is_forwarded(), "must be forwarded");
  4245   assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
  4247   objArrayOop obj = objArrayOop(old->forwardee());
  4248   assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
  4249   // Process ParGCArrayScanChunk elements now
  4250   // and push the remainder back onto queue
  4251   int start     = arrayOop(old)->length();
  4252   int end       = obj->length();
  4253   int remainder = end - start;
  4254   assert(start <= end, "just checking");
  4255   if (remainder > 2 * ParGCArrayScanChunk) {
  4256     // Test above combines last partial chunk with a full chunk
  4257     end = start + ParGCArrayScanChunk;
  4258     arrayOop(old)->set_length(end);
  4259     // Push remainder.
  4260     _par_scan_state->push_on_queue(set_partial_array_mask(old));
  4261   } else {
  4262     // Restore length so that the heap remains parsable in
  4263     // case of evacuation failure.
  4264     arrayOop(old)->set_length(end);
  4267   // process our set of indices (include header in first chunk)
  4268   process_array_chunk<oop>(obj, start, end);
  4271 int G1ScanAndBalanceClosure::_nq = 0;
  4273 class G1ParEvacuateFollowersClosure : public VoidClosure {
  4274 protected:
  4275   G1CollectedHeap*              _g1h;
  4276   G1ParScanThreadState*         _par_scan_state;
  4277   RefToScanQueueSet*            _queues;
  4278   ParallelTaskTerminator*       _terminator;
  4280   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  4281   RefToScanQueueSet*      queues()         { return _queues; }
  4282   ParallelTaskTerminator* terminator()     { return _terminator; }
  4284 public:
  4285   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  4286                                 G1ParScanThreadState* par_scan_state,
  4287                                 RefToScanQueueSet* queues,
  4288                                 ParallelTaskTerminator* terminator)
  4289     : _g1h(g1h), _par_scan_state(par_scan_state),
  4290       _queues(queues), _terminator(terminator) {}
  4292   void do_void() {
  4293     G1ParScanThreadState* pss = par_scan_state();
  4294     while (true) {
  4295       oop* ref_to_scan;
  4296       pss->trim_queue();
  4297       IF_G1_DETAILED_STATS(pss->note_steal_attempt());
  4298       if (queues()->steal(pss->queue_num(),
  4299                           pss->hash_seed(),
  4300                           ref_to_scan)) {
  4301         IF_G1_DETAILED_STATS(pss->note_steal());
  4303         // slightly paranoid tests; I'm trying to catch potential
  4304         // problems before we go into push_on_queue to know where the
  4305         // problem is coming from
  4306         assert(ref_to_scan != NULL, "invariant");
  4307         assert(has_partial_array_mask(ref_to_scan) ||
  4308                                    _g1h->obj_in_cs(*ref_to_scan), "invariant");
  4309         pss->push_on_queue(ref_to_scan);
  4310         continue;
  4312       pss->start_term_time();
  4313       if (terminator()->offer_termination()) break;
  4314       pss->end_term_time();
  4316     pss->end_term_time();
  4317     pss->retire_alloc_buffers();
  4319 };
  4321 class G1ParTask : public AbstractGangTask {
  4322 protected:
  4323   G1CollectedHeap*       _g1h;
  4324   RefToScanQueueSet      *_queues;
  4325   ParallelTaskTerminator _terminator;
  4327   Mutex _stats_lock;
  4328   Mutex* stats_lock() { return &_stats_lock; }
  4330   size_t getNCards() {
  4331     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  4332       / G1BlockOffsetSharedArray::N_bytes;
  4335 public:
  4336   G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
  4337     : AbstractGangTask("G1 collection"),
  4338       _g1h(g1h),
  4339       _queues(task_queues),
  4340       _terminator(workers, _queues),
  4341       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
  4342   {}
  4344   RefToScanQueueSet* queues() { return _queues; }
  4346   RefToScanQueue *work_queue(int i) {
  4347     return queues()->queue(i);
  4350   void work(int i) {
  4351     ResourceMark rm;
  4352     HandleMark   hm;
  4354     G1ParScanThreadState            pss(_g1h, i);
  4355     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
  4356     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
  4357     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
  4359     pss.set_evac_closure(&scan_evac_cl);
  4360     pss.set_evac_failure_closure(&evac_failure_cl);
  4361     pss.set_partial_scan_closure(&partial_scan_cl);
  4363     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  4364     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  4365     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
  4367     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  4368     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  4369     G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  4371     OopsInHeapRegionClosure        *scan_root_cl;
  4372     OopsInHeapRegionClosure        *scan_perm_cl;
  4373     OopsInHeapRegionClosure        *scan_so_cl;
  4375     if (_g1h->g1_policy()->should_initiate_conc_mark()) {
  4376       scan_root_cl = &scan_mark_root_cl;
  4377       scan_perm_cl = &scan_mark_perm_cl;
  4378       scan_so_cl   = &scan_mark_heap_rs_cl;
  4379     } else {
  4380       scan_root_cl = &only_scan_root_cl;
  4381       scan_perm_cl = &only_scan_perm_cl;
  4382       scan_so_cl   = &only_scan_heap_rs_cl;
  4385     pss.start_strong_roots();
  4386     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  4387                                   SharedHeap::SO_AllClasses,
  4388                                   scan_root_cl,
  4389                                   &only_scan_heap_rs_cl,
  4390                                   scan_so_cl,
  4391                                   scan_perm_cl,
  4392                                   i);
  4393     pss.end_strong_roots();
  4395       double start = os::elapsedTime();
  4396       G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  4397       evac.do_void();
  4398       double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  4399       double term_ms = pss.term_time()*1000.0;
  4400       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
  4401       _g1h->g1_policy()->record_termination_time(i, term_ms);
  4403     if (G1UseSurvivorSpaces) {
  4404       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  4406     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  4408     // Clean up any par-expanded rem sets.
  4409     HeapRegionRemSet::par_cleanup();
  4411     MutexLocker x(stats_lock());
  4412     if (ParallelGCVerbose) {
  4413       gclog_or_tty->print("Thread %d complete:\n", i);
  4414 #if G1_DETAILED_STATS
  4415       gclog_or_tty->print("  Pushes: %7d    Pops: %7d   Overflows: %7d   Steals %7d (in %d attempts)\n",
  4416                           pss.pushes(),
  4417                           pss.pops(),
  4418                           pss.overflow_pushes(),
  4419                           pss.steals(),
  4420                           pss.steal_attempts());
  4421 #endif
  4422       double elapsed      = pss.elapsed();
  4423       double strong_roots = pss.strong_roots_time();
  4424       double term         = pss.term_time();
  4425       gclog_or_tty->print("  Elapsed: %7.2f ms.\n"
  4426                           "    Strong roots: %7.2f ms (%6.2f%%)\n"
  4427                           "    Termination:  %7.2f ms (%6.2f%%) (in %d entries)\n",
  4428                           elapsed * 1000.0,
  4429                           strong_roots * 1000.0, (strong_roots*100.0/elapsed),
  4430                           term * 1000.0, (term*100.0/elapsed),
  4431                           pss.term_attempts());
  4432       size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste();
  4433       gclog_or_tty->print("  Waste: %8dK\n"
  4434                  "    Alloc Buffer: %8dK\n"
  4435                  "    Undo: %8dK\n",
  4436                  (total_waste * HeapWordSize) / K,
  4437                  (pss.alloc_buffer_waste() * HeapWordSize) / K,
  4438                  (pss.undo_waste() * HeapWordSize) / K);
  4441     assert(pss.refs_to_scan() == 0, "Task queue should be empty");
  4442     assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
  4444 };
  4446 // *** Common G1 Evacuation Stuff
  4448 class G1CountClosure: public OopsInHeapRegionClosure {
  4449 public:
  4450   int n;
  4451   G1CountClosure() : n(0) {}
  4452   void do_oop(narrowOop* p) {
  4453     guarantee(false, "NYI");
  4455   void do_oop(oop* p) {
  4456     oop obj = *p;
  4457     assert(obj != NULL && G1CollectedHeap::heap()->obj_in_cs(obj),
  4458            "Rem set closure called on non-rem-set pointer.");
  4459     n++;
  4461 };
  4463 static G1CountClosure count_closure;
  4465 void
  4466 G1CollectedHeap::
  4467 g1_process_strong_roots(bool collecting_perm_gen,
  4468                         SharedHeap::ScanningOption so,
  4469                         OopClosure* scan_non_heap_roots,
  4470                         OopsInHeapRegionClosure* scan_rs,
  4471                         OopsInHeapRegionClosure* scan_so,
  4472                         OopsInGenClosure* scan_perm,
  4473                         int worker_i) {
  4474   // First scan the strong roots, including the perm gen.
  4475   double ext_roots_start = os::elapsedTime();
  4476   double closure_app_time_sec = 0.0;
  4478   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  4479   BufferingOopsInGenClosure buf_scan_perm(scan_perm);
  4480   buf_scan_perm.set_generation(perm_gen());
  4482   process_strong_roots(collecting_perm_gen, so,
  4483                        &buf_scan_non_heap_roots,
  4484                        &buf_scan_perm);
  4485   // Finish up any enqueued closure apps.
  4486   buf_scan_non_heap_roots.done();
  4487   buf_scan_perm.done();
  4488   double ext_roots_end = os::elapsedTime();
  4489   g1_policy()->reset_obj_copy_time(worker_i);
  4490   double obj_copy_time_sec =
  4491     buf_scan_non_heap_roots.closure_app_seconds() +
  4492     buf_scan_perm.closure_app_seconds();
  4493   g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  4494   double ext_root_time_ms =
  4495     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  4496   g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  4498   // Scan strong roots in mark stack.
  4499   if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
  4500     concurrent_mark()->oops_do(scan_non_heap_roots);
  4502   double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
  4503   g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
  4505   // XXX What should this be doing in the parallel case?
  4506   g1_policy()->record_collection_pause_end_CH_strong_roots();
  4507   if (scan_so != NULL) {
  4508     scan_scan_only_set(scan_so, worker_i);
  4510   // Now scan the complement of the collection set.
  4511   if (scan_rs != NULL) {
  4512     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
  4514   // Finish with the ref_processor roots.
  4515   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4516     ref_processor()->oops_do(scan_non_heap_roots);
  4518   g1_policy()->record_collection_pause_end_G1_strong_roots();
  4519   _process_strong_tasks->all_tasks_completed();
  4522 void
  4523 G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
  4524                                        OopsInHeapRegionClosure* oc,
  4525                                        int worker_i) {
  4526   HeapWord* startAddr = r->bottom();
  4527   HeapWord* endAddr = r->used_region().end();
  4529   oc->set_region(r);
  4531   HeapWord* p = r->bottom();
  4532   HeapWord* t = r->top();
  4533   guarantee( p == r->next_top_at_mark_start(), "invariant" );
  4534   while (p < t) {
  4535     oop obj = oop(p);
  4536     p += obj->oop_iterate(oc);
  4540 void
  4541 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
  4542                                     int worker_i) {
  4543   double start = os::elapsedTime();
  4545   BufferingOopsInHeapRegionClosure boc(oc);
  4547   FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
  4548   FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
  4550   OopsInHeapRegionClosure *foc;
  4551   if (g1_policy()->should_initiate_conc_mark())
  4552     foc = &scan_and_mark;
  4553   else
  4554     foc = &scan_only;
  4556   HeapRegion* hr;
  4557   int n = 0;
  4558   while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
  4559     scan_scan_only_region(hr, foc, worker_i);
  4560     ++n;
  4562   boc.done();
  4564   double closure_app_s = boc.closure_app_seconds();
  4565   g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
  4566   double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
  4567   g1_policy()->record_scan_only_time(worker_i, ms, n);
  4570 void
  4571 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
  4572                                        OopClosure* non_root_closure) {
  4573   SharedHeap::process_weak_roots(root_closure, non_root_closure);
  4577 class SaveMarksClosure: public HeapRegionClosure {
  4578 public:
  4579   bool doHeapRegion(HeapRegion* r) {
  4580     r->save_marks();
  4581     return false;
  4583 };
  4585 void G1CollectedHeap::save_marks() {
  4586   if (ParallelGCThreads == 0) {
  4587     SaveMarksClosure sm;
  4588     heap_region_iterate(&sm);
  4590   // We do this even in the parallel case
  4591   perm_gen()->save_marks();
  4594 void G1CollectedHeap::evacuate_collection_set() {
  4595   set_evacuation_failed(false);
  4597   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  4598   concurrent_g1_refine()->set_use_cache(false);
  4599   int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
  4600   set_par_threads(n_workers);
  4601   G1ParTask g1_par_task(this, n_workers, _task_queues);
  4603   init_for_evac_failure(NULL);
  4605   change_strong_roots_parity();  // In preparation for parallel strong roots.
  4606   rem_set()->prepare_for_younger_refs_iterate(true);
  4608   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  4609   double start_par = os::elapsedTime();
  4610   if (ParallelGCThreads > 0) {
  4611     // The individual threads will set their evac-failure closures.
  4612     workers()->run_task(&g1_par_task);
  4613   } else {
  4614     g1_par_task.work(0);
  4617   double par_time = (os::elapsedTime() - start_par) * 1000.0;
  4618   g1_policy()->record_par_time(par_time);
  4619   set_par_threads(0);
  4620   // Is this the right thing to do here?  We don't save marks
  4621   // on individual heap regions when we allocate from
  4622   // them in parallel, so this seems like the correct place for this.
  4623   retire_all_alloc_regions();
  4625     G1IsAliveClosure is_alive(this);
  4626     G1KeepAliveClosure keep_alive(this);
  4627     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  4629   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  4631   concurrent_g1_refine()->set_use_cache(true);
  4633   finalize_for_evac_failure();
  4635   // Must do this before removing self-forwarding pointers, which clears
  4636   // the per-region evac-failure flags.
  4637   concurrent_mark()->complete_marking_in_collection_set();
  4639   if (evacuation_failed()) {
  4640     remove_self_forwarding_pointers();
  4641     if (PrintGCDetails) {
  4642       gclog_or_tty->print(" (evacuation failed)");
  4643     } else if (PrintGC) {
  4644       gclog_or_tty->print("--");
  4648   if (G1DeferredRSUpdate) {
  4649     RedirtyLoggedCardTableEntryFastClosure redirty;
  4650     dirty_card_queue_set().set_closure(&redirty);
  4651     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  4652     JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
  4653     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  4656   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4659 void G1CollectedHeap::free_region(HeapRegion* hr) {
  4660   size_t pre_used = 0;
  4661   size_t cleared_h_regions = 0;
  4662   size_t freed_regions = 0;
  4663   UncleanRegionList local_list;
  4665   HeapWord* start = hr->bottom();
  4666   HeapWord* end   = hr->prev_top_at_mark_start();
  4667   size_t used_bytes = hr->used();
  4668   size_t live_bytes = hr->max_live_bytes();
  4669   if (used_bytes > 0) {
  4670     guarantee( live_bytes <= used_bytes, "invariant" );
  4671   } else {
  4672     guarantee( live_bytes == 0, "invariant" );
  4675   size_t garbage_bytes = used_bytes - live_bytes;
  4676   if (garbage_bytes > 0)
  4677     g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
  4679   free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
  4680                    &local_list);
  4681   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
  4682                           &local_list);
  4685 void
  4686 G1CollectedHeap::free_region_work(HeapRegion* hr,
  4687                                   size_t& pre_used,
  4688                                   size_t& cleared_h_regions,
  4689                                   size_t& freed_regions,
  4690                                   UncleanRegionList* list,
  4691                                   bool par) {
  4692   pre_used += hr->used();
  4693   if (hr->isHumongous()) {
  4694     assert(hr->startsHumongous(),
  4695            "Only the start of a humongous region should be freed.");
  4696     int ind = _hrs->find(hr);
  4697     assert(ind != -1, "Should have an index.");
  4698     // Clear the start region.
  4699     hr->hr_clear(par, true /*clear_space*/);
  4700     list->insert_before_head(hr);
  4701     cleared_h_regions++;
  4702     freed_regions++;
  4703     // Clear any continued regions.
  4704     ind++;
  4705     while ((size_t)ind < n_regions()) {
  4706       HeapRegion* hrc = _hrs->at(ind);
  4707       if (!hrc->continuesHumongous()) break;
  4708       // Otherwise, does continue the H region.
  4709       assert(hrc->humongous_start_region() == hr, "Huh?");
  4710       hrc->hr_clear(par, true /*clear_space*/);
  4711       cleared_h_regions++;
  4712       freed_regions++;
  4713       list->insert_before_head(hrc);
  4714       ind++;
  4716   } else {
  4717     hr->hr_clear(par, true /*clear_space*/);
  4718     list->insert_before_head(hr);
  4719     freed_regions++;
  4720     // If we're using clear2, this should not be enabled.
  4721     // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
  4725 void G1CollectedHeap::finish_free_region_work(size_t pre_used,
  4726                                               size_t cleared_h_regions,
  4727                                               size_t freed_regions,
  4728                                               UncleanRegionList* list) {
  4729   if (list != NULL && list->sz() > 0) {
  4730     prepend_region_list_on_unclean_list(list);
  4732   // Acquire a lock, if we're parallel, to update possibly-shared
  4733   // variables.
  4734   Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
  4736     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  4737     _summary_bytes_used -= pre_used;
  4738     _num_humongous_regions -= (int) cleared_h_regions;
  4739     _free_regions += freed_regions;
  4744 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
  4745   while (list != NULL) {
  4746     guarantee( list->is_young(), "invariant" );
  4748     HeapWord* bottom = list->bottom();
  4749     HeapWord* end = list->end();
  4750     MemRegion mr(bottom, end);
  4751     ct_bs->dirty(mr);
  4753     list = list->get_next_young_region();
  4758 class G1ParCleanupCTTask : public AbstractGangTask {
  4759   CardTableModRefBS* _ct_bs;
  4760   G1CollectedHeap* _g1h;
  4761 public:
  4762   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
  4763                      G1CollectedHeap* g1h) :
  4764     AbstractGangTask("G1 Par Cleanup CT Task"),
  4765     _ct_bs(ct_bs),
  4766     _g1h(g1h)
  4767   { }
  4769   void work(int i) {
  4770     HeapRegion* r;
  4771     while (r = _g1h->pop_dirty_cards_region()) {
  4772       clear_cards(r);
  4775   void clear_cards(HeapRegion* r) {
  4776     // Cards for Survivor and Scan-Only regions will be dirtied later.
  4777     if (!r->is_scan_only() && !r->is_survivor()) {
  4778       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  4781 };
  4784 void G1CollectedHeap::cleanUpCardTable() {
  4785   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  4786   double start = os::elapsedTime();
  4788   // Iterate over the dirty cards region list.
  4789   G1ParCleanupCTTask cleanup_task(ct_bs, this);
  4790   if (ParallelGCThreads > 0) {
  4791     set_par_threads(workers()->total_workers());
  4792     workers()->run_task(&cleanup_task);
  4793     set_par_threads(0);
  4794   } else {
  4795     while (_dirty_cards_region_list) {
  4796       HeapRegion* r = _dirty_cards_region_list;
  4797       cleanup_task.clear_cards(r);
  4798       _dirty_cards_region_list = r->get_next_dirty_cards_region();
  4799       if (_dirty_cards_region_list == r) {
  4800         // The last region.
  4801         _dirty_cards_region_list = NULL;
  4803       r->set_next_dirty_cards_region(NULL);
  4806   // now, redirty the cards of the scan-only and survivor regions
  4807   // (it seemed faster to do it this way, instead of iterating over
  4808   // all regions and then clearing / dirtying as appropriate)
  4809   dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
  4810   dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
  4812   double elapsed = os::elapsedTime() - start;
  4813   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
  4817 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
  4818   if (g1_policy()->should_do_collection_pause(word_size)) {
  4819     do_collection_pause();
  4823 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  4824   double young_time_ms     = 0.0;
  4825   double non_young_time_ms = 0.0;
  4827   G1CollectorPolicy* policy = g1_policy();
  4829   double start_sec = os::elapsedTime();
  4830   bool non_young = true;
  4832   HeapRegion* cur = cs_head;
  4833   int age_bound = -1;
  4834   size_t rs_lengths = 0;
  4836   while (cur != NULL) {
  4837     if (non_young) {
  4838       if (cur->is_young()) {
  4839         double end_sec = os::elapsedTime();
  4840         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  4841         non_young_time_ms += elapsed_ms;
  4843         start_sec = os::elapsedTime();
  4844         non_young = false;
  4846     } else {
  4847       if (!cur->is_on_free_list()) {
  4848         double end_sec = os::elapsedTime();
  4849         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  4850         young_time_ms += elapsed_ms;
  4852         start_sec = os::elapsedTime();
  4853         non_young = true;
  4857     rs_lengths += cur->rem_set()->occupied();
  4859     HeapRegion* next = cur->next_in_collection_set();
  4860     assert(cur->in_collection_set(), "bad CS");
  4861     cur->set_next_in_collection_set(NULL);
  4862     cur->set_in_collection_set(false);
  4864     if (cur->is_young()) {
  4865       int index = cur->young_index_in_cset();
  4866       guarantee( index != -1, "invariant" );
  4867       guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
  4868       size_t words_survived = _surviving_young_words[index];
  4869       cur->record_surv_words_in_group(words_survived);
  4870     } else {
  4871       int index = cur->young_index_in_cset();
  4872       guarantee( index == -1, "invariant" );
  4875     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  4876             (!cur->is_young() && cur->young_index_in_cset() == -1),
  4877             "invariant" );
  4879     if (!cur->evacuation_failed()) {
  4880       // And the region is empty.
  4881       assert(!cur->is_empty(),
  4882              "Should not have empty regions in a CS.");
  4883       free_region(cur);
  4884     } else {
  4885       guarantee( !cur->is_scan_only(), "should not be scan only" );
  4886       cur->uninstall_surv_rate_group();
  4887       if (cur->is_young())
  4888         cur->set_young_index_in_cset(-1);
  4889       cur->set_not_young();
  4890       cur->set_evacuation_failed(false);
  4892     cur = next;
  4895   policy->record_max_rs_lengths(rs_lengths);
  4896   policy->cset_regions_freed();
  4898   double end_sec = os::elapsedTime();
  4899   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  4900   if (non_young)
  4901     non_young_time_ms += elapsed_ms;
  4902   else
  4903     young_time_ms += elapsed_ms;
  4905   policy->record_young_free_cset_time_ms(young_time_ms);
  4906   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
  4909 HeapRegion*
  4910 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
  4911   assert(ZF_mon->owned_by_self(), "Precondition");
  4912   HeapRegion* res = pop_unclean_region_list_locked();
  4913   if (res != NULL) {
  4914     assert(!res->continuesHumongous() &&
  4915            res->zero_fill_state() != HeapRegion::Allocated,
  4916            "Only free regions on unclean list.");
  4917     if (zero_filled) {
  4918       res->ensure_zero_filled_locked();
  4919       res->set_zero_fill_allocated();
  4922   return res;
  4925 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
  4926   MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
  4927   return alloc_region_from_unclean_list_locked(zero_filled);
  4930 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
  4931   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4932   put_region_on_unclean_list_locked(r);
  4933   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  4936 void G1CollectedHeap::set_unclean_regions_coming(bool b) {
  4937   MutexLockerEx x(Cleanup_mon);
  4938   set_unclean_regions_coming_locked(b);
  4941 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
  4942   assert(Cleanup_mon->owned_by_self(), "Precondition");
  4943   _unclean_regions_coming = b;
  4944   // Wake up mutator threads that might be waiting for completeCleanup to
  4945   // finish.
  4946   if (!b) Cleanup_mon->notify_all();
  4949 void G1CollectedHeap::wait_for_cleanup_complete() {
  4950   MutexLockerEx x(Cleanup_mon);
  4951   wait_for_cleanup_complete_locked();
  4954 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
  4955   assert(Cleanup_mon->owned_by_self(), "precondition");
  4956   while (_unclean_regions_coming) {
  4957     Cleanup_mon->wait();
  4961 void
  4962 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
  4963   assert(ZF_mon->owned_by_self(), "precondition.");
  4964   _unclean_region_list.insert_before_head(r);
  4967 void
  4968 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
  4969   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4970   prepend_region_list_on_unclean_list_locked(list);
  4971   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  4974 void
  4975 G1CollectedHeap::
  4976 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
  4977   assert(ZF_mon->owned_by_self(), "precondition.");
  4978   _unclean_region_list.prepend_list(list);
  4981 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
  4982   assert(ZF_mon->owned_by_self(), "precondition.");
  4983   HeapRegion* res = _unclean_region_list.pop();
  4984   if (res != NULL) {
  4985     // Inform ZF thread that there's a new unclean head.
  4986     if (_unclean_region_list.hd() != NULL && should_zf())
  4987       ZF_mon->notify_all();
  4989   return res;
  4992 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
  4993   assert(ZF_mon->owned_by_self(), "precondition.");
  4994   return _unclean_region_list.hd();
  4998 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
  4999   assert(ZF_mon->owned_by_self(), "Precondition");
  5000   HeapRegion* r = peek_unclean_region_list_locked();
  5001   if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
  5002     // Result of below must be equal to "r", since we hold the lock.
  5003     (void)pop_unclean_region_list_locked();
  5004     put_free_region_on_list_locked(r);
  5005     return true;
  5006   } else {
  5007     return false;
  5011 bool G1CollectedHeap::move_cleaned_region_to_free_list() {
  5012   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5013   return move_cleaned_region_to_free_list_locked();
  5017 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
  5018   assert(ZF_mon->owned_by_self(), "precondition.");
  5019   assert(_free_region_list_size == free_region_list_length(), "Inv");
  5020   assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
  5021         "Regions on free list must be zero filled");
  5022   assert(!r->isHumongous(), "Must not be humongous.");
  5023   assert(r->is_empty(), "Better be empty");
  5024   assert(!r->is_on_free_list(),
  5025          "Better not already be on free list");
  5026   assert(!r->is_on_unclean_list(),
  5027          "Better not already be on unclean list");
  5028   r->set_on_free_list(true);
  5029   r->set_next_on_free_list(_free_region_list);
  5030   _free_region_list = r;
  5031   _free_region_list_size++;
  5032   assert(_free_region_list_size == free_region_list_length(), "Inv");
  5035 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
  5036   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5037   put_free_region_on_list_locked(r);
  5040 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
  5041   assert(ZF_mon->owned_by_self(), "precondition.");
  5042   assert(_free_region_list_size == free_region_list_length(), "Inv");
  5043   HeapRegion* res = _free_region_list;
  5044   if (res != NULL) {
  5045     _free_region_list = res->next_from_free_list();
  5046     _free_region_list_size--;
  5047     res->set_on_free_list(false);
  5048     res->set_next_on_free_list(NULL);
  5049     assert(_free_region_list_size == free_region_list_length(), "Inv");
  5051   return res;
  5055 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
  5056   // By self, or on behalf of self.
  5057   assert(Heap_lock->is_locked(), "Precondition");
  5058   HeapRegion* res = NULL;
  5059   bool first = true;
  5060   while (res == NULL) {
  5061     if (zero_filled || !first) {
  5062       MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5063       res = pop_free_region_list_locked();
  5064       if (res != NULL) {
  5065         assert(!res->zero_fill_is_allocated(),
  5066                "No allocated regions on free list.");
  5067         res->set_zero_fill_allocated();
  5068       } else if (!first) {
  5069         break;  // We tried both, time to return NULL.
  5073     if (res == NULL) {
  5074       res = alloc_region_from_unclean_list(zero_filled);
  5076     assert(res == NULL ||
  5077            !zero_filled ||
  5078            res->zero_fill_is_allocated(),
  5079            "We must have allocated the region we're returning");
  5080     first = false;
  5082   return res;
  5085 void G1CollectedHeap::remove_allocated_regions_from_lists() {
  5086   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5088     HeapRegion* prev = NULL;
  5089     HeapRegion* cur = _unclean_region_list.hd();
  5090     while (cur != NULL) {
  5091       HeapRegion* next = cur->next_from_unclean_list();
  5092       if (cur->zero_fill_is_allocated()) {
  5093         // Remove from the list.
  5094         if (prev == NULL) {
  5095           (void)_unclean_region_list.pop();
  5096         } else {
  5097           _unclean_region_list.delete_after(prev);
  5099         cur->set_on_unclean_list(false);
  5100         cur->set_next_on_unclean_list(NULL);
  5101       } else {
  5102         prev = cur;
  5104       cur = next;
  5106     assert(_unclean_region_list.sz() == unclean_region_list_length(),
  5107            "Inv");
  5111     HeapRegion* prev = NULL;
  5112     HeapRegion* cur = _free_region_list;
  5113     while (cur != NULL) {
  5114       HeapRegion* next = cur->next_from_free_list();
  5115       if (cur->zero_fill_is_allocated()) {
  5116         // Remove from the list.
  5117         if (prev == NULL) {
  5118           _free_region_list = cur->next_from_free_list();
  5119         } else {
  5120           prev->set_next_on_free_list(cur->next_from_free_list());
  5122         cur->set_on_free_list(false);
  5123         cur->set_next_on_free_list(NULL);
  5124         _free_region_list_size--;
  5125       } else {
  5126         prev = cur;
  5128       cur = next;
  5130     assert(_free_region_list_size == free_region_list_length(), "Inv");
  5134 bool G1CollectedHeap::verify_region_lists() {
  5135   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5136   return verify_region_lists_locked();
  5139 bool G1CollectedHeap::verify_region_lists_locked() {
  5140   HeapRegion* unclean = _unclean_region_list.hd();
  5141   while (unclean != NULL) {
  5142     guarantee(unclean->is_on_unclean_list(), "Well, it is!");
  5143     guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
  5144     guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
  5145               "Everything else is possible.");
  5146     unclean = unclean->next_from_unclean_list();
  5148   guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
  5150   HeapRegion* free_r = _free_region_list;
  5151   while (free_r != NULL) {
  5152     assert(free_r->is_on_free_list(), "Well, it is!");
  5153     assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
  5154     switch (free_r->zero_fill_state()) {
  5155     case HeapRegion::NotZeroFilled:
  5156     case HeapRegion::ZeroFilling:
  5157       guarantee(false, "Should not be on free list.");
  5158       break;
  5159     default:
  5160       // Everything else is possible.
  5161       break;
  5163     free_r = free_r->next_from_free_list();
  5165   guarantee(_free_region_list_size == free_region_list_length(), "Inv");
  5166   // If we didn't do an assertion...
  5167   return true;
  5170 size_t G1CollectedHeap::free_region_list_length() {
  5171   assert(ZF_mon->owned_by_self(), "precondition.");
  5172   size_t len = 0;
  5173   HeapRegion* cur = _free_region_list;
  5174   while (cur != NULL) {
  5175     len++;
  5176     cur = cur->next_from_free_list();
  5178   return len;
  5181 size_t G1CollectedHeap::unclean_region_list_length() {
  5182   assert(ZF_mon->owned_by_self(), "precondition.");
  5183   return _unclean_region_list.length();
  5186 size_t G1CollectedHeap::n_regions() {
  5187   return _hrs->length();
  5190 size_t G1CollectedHeap::max_regions() {
  5191   return
  5192     (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
  5193     HeapRegion::GrainBytes;
  5196 size_t G1CollectedHeap::free_regions() {
  5197   /* Possibly-expensive assert.
  5198   assert(_free_regions == count_free_regions(),
  5199          "_free_regions is off.");
  5200   */
  5201   return _free_regions;
  5204 bool G1CollectedHeap::should_zf() {
  5205   return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
  5208 class RegionCounter: public HeapRegionClosure {
  5209   size_t _n;
  5210 public:
  5211   RegionCounter() : _n(0) {}
  5212   bool doHeapRegion(HeapRegion* r) {
  5213     if (r->is_empty()) {
  5214       assert(!r->isHumongous(), "H regions should not be empty.");
  5215       _n++;
  5217     return false;
  5219   int res() { return (int) _n; }
  5220 };
  5222 size_t G1CollectedHeap::count_free_regions() {
  5223   RegionCounter rc;
  5224   heap_region_iterate(&rc);
  5225   size_t n = rc.res();
  5226   if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
  5227     n--;
  5228   return n;
  5231 size_t G1CollectedHeap::count_free_regions_list() {
  5232   size_t n = 0;
  5233   size_t o = 0;
  5234   ZF_mon->lock_without_safepoint_check();
  5235   HeapRegion* cur = _free_region_list;
  5236   while (cur != NULL) {
  5237     cur = cur->next_from_free_list();
  5238     n++;
  5240   size_t m = unclean_region_list_length();
  5241   ZF_mon->unlock();
  5242   return n + m;
  5245 bool G1CollectedHeap::should_set_young_locked() {
  5246   assert(heap_lock_held_for_gc(),
  5247               "the heap lock should already be held by or for this thread");
  5248   return  (g1_policy()->in_young_gc_mode() &&
  5249            g1_policy()->should_add_next_region_to_young_list());
  5252 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  5253   assert(heap_lock_held_for_gc(),
  5254               "the heap lock should already be held by or for this thread");
  5255   _young_list->push_region(hr);
  5256   g1_policy()->set_region_short_lived(hr);
  5259 class NoYoungRegionsClosure: public HeapRegionClosure {
  5260 private:
  5261   bool _success;
  5262 public:
  5263   NoYoungRegionsClosure() : _success(true) { }
  5264   bool doHeapRegion(HeapRegion* r) {
  5265     if (r->is_young()) {
  5266       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  5267                              r->bottom(), r->end());
  5268       _success = false;
  5270     return false;
  5272   bool success() { return _success; }
  5273 };
  5275 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
  5276                                              bool check_sample) {
  5277   bool ret = true;
  5279   ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
  5280   if (!ignore_scan_only_list) {
  5281     NoYoungRegionsClosure closure;
  5282     heap_region_iterate(&closure);
  5283     ret = ret && closure.success();
  5286   return ret;
  5289 void G1CollectedHeap::empty_young_list() {
  5290   assert(heap_lock_held_for_gc(),
  5291               "the heap lock should already be held by or for this thread");
  5292   assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
  5294   _young_list->empty_list();
  5297 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
  5298   bool no_allocs = true;
  5299   for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
  5300     HeapRegion* r = _gc_alloc_regions[ap];
  5301     no_allocs = r == NULL || r->saved_mark_at_top();
  5303   return no_allocs;
  5306 void G1CollectedHeap::retire_all_alloc_regions() {
  5307   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  5308     HeapRegion* r = _gc_alloc_regions[ap];
  5309     if (r != NULL) {
  5310       // Check for aliases.
  5311       bool has_processed_alias = false;
  5312       for (int i = 0; i < ap; ++i) {
  5313         if (_gc_alloc_regions[i] == r) {
  5314           has_processed_alias = true;
  5315           break;
  5318       if (!has_processed_alias) {
  5319         retire_alloc_region(r, false /* par */);
  5326 // Done at the start of full GC.
  5327 void G1CollectedHeap::tear_down_region_lists() {
  5328   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5329   while (pop_unclean_region_list_locked() != NULL) ;
  5330   assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
  5331          "Postconditions of loop.")
  5332   while (pop_free_region_list_locked() != NULL) ;
  5333   assert(_free_region_list == NULL, "Postcondition of loop.");
  5334   if (_free_region_list_size != 0) {
  5335     gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
  5336     print();
  5338   assert(_free_region_list_size == 0, "Postconditions of loop.");
  5342 class RegionResetter: public HeapRegionClosure {
  5343   G1CollectedHeap* _g1;
  5344   int _n;
  5345 public:
  5346   RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  5347   bool doHeapRegion(HeapRegion* r) {
  5348     if (r->continuesHumongous()) return false;
  5349     if (r->top() > r->bottom()) {
  5350       if (r->top() < r->end()) {
  5351         Copy::fill_to_words(r->top(),
  5352                           pointer_delta(r->end(), r->top()));
  5354       r->set_zero_fill_allocated();
  5355     } else {
  5356       assert(r->is_empty(), "tautology");
  5357       _n++;
  5358       switch (r->zero_fill_state()) {
  5359         case HeapRegion::NotZeroFilled:
  5360         case HeapRegion::ZeroFilling:
  5361           _g1->put_region_on_unclean_list_locked(r);
  5362           break;
  5363         case HeapRegion::Allocated:
  5364           r->set_zero_fill_complete();
  5365           // no break; go on to put on free list.
  5366         case HeapRegion::ZeroFilled:
  5367           _g1->put_free_region_on_list_locked(r);
  5368           break;
  5371     return false;
  5374   int getFreeRegionCount() {return _n;}
  5375 };
  5377 // Done at the end of full GC.
  5378 void G1CollectedHeap::rebuild_region_lists() {
  5379   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5380   // This needs to go at the end of the full GC.
  5381   RegionResetter rs;
  5382   heap_region_iterate(&rs);
  5383   _free_regions = rs.getFreeRegionCount();
  5384   // Tell the ZF thread it may have work to do.
  5385   if (should_zf()) ZF_mon->notify_all();
  5388 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
  5389   G1CollectedHeap* _g1;
  5390   int _n;
  5391 public:
  5392   UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  5393   bool doHeapRegion(HeapRegion* r) {
  5394     if (r->continuesHumongous()) return false;
  5395     if (r->top() > r->bottom()) {
  5396       // There are assertions in "set_zero_fill_needed()" below that
  5397       // require top() == bottom(), so this is technically illegal.
  5398       // We'll skirt the law here, by making that true temporarily.
  5399       DEBUG_ONLY(HeapWord* save_top = r->top();
  5400                  r->set_top(r->bottom()));
  5401       r->set_zero_fill_needed();
  5402       DEBUG_ONLY(r->set_top(save_top));
  5404     return false;
  5406 };
  5408 // Done at the start of full GC.
  5409 void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
  5410   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  5411   // This needs to go at the end of the full GC.
  5412   UsedRegionsNeedZeroFillSetter rs;
  5413   heap_region_iterate(&rs);
  5416 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  5417   _refine_cte_cl->set_concurrent(concurrent);
  5420 #ifndef PRODUCT
  5422 class PrintHeapRegionClosure: public HeapRegionClosure {
  5423 public:
  5424   bool doHeapRegion(HeapRegion *r) {
  5425     gclog_or_tty->print("Region: "PTR_FORMAT":", r);
  5426     if (r != NULL) {
  5427       if (r->is_on_free_list())
  5428         gclog_or_tty->print("Free ");
  5429       if (r->is_young())
  5430         gclog_or_tty->print("Young ");
  5431       if (r->isHumongous())
  5432         gclog_or_tty->print("Is Humongous ");
  5433       r->print();
  5435     return false;
  5437 };
  5439 class SortHeapRegionClosure : public HeapRegionClosure {
  5440   size_t young_regions,free_regions, unclean_regions;
  5441   size_t hum_regions, count;
  5442   size_t unaccounted, cur_unclean, cur_alloc;
  5443   size_t total_free;
  5444   HeapRegion* cur;
  5445 public:
  5446   SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
  5447     free_regions(0), unclean_regions(0),
  5448     hum_regions(0),
  5449     count(0), unaccounted(0),
  5450     cur_alloc(0), total_free(0)
  5451   {}
  5452   bool doHeapRegion(HeapRegion *r) {
  5453     count++;
  5454     if (r->is_on_free_list()) free_regions++;
  5455     else if (r->is_on_unclean_list()) unclean_regions++;
  5456     else if (r->isHumongous())  hum_regions++;
  5457     else if (r->is_young()) young_regions++;
  5458     else if (r == cur) cur_alloc++;
  5459     else unaccounted++;
  5460     return false;
  5462   void print() {
  5463     total_free = free_regions + unclean_regions;
  5464     gclog_or_tty->print("%d regions\n", count);
  5465     gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
  5466                         total_free, free_regions, unclean_regions);
  5467     gclog_or_tty->print("%d humongous %d young\n",
  5468                         hum_regions, young_regions);
  5469     gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
  5470     gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
  5472 };
  5474 void G1CollectedHeap::print_region_counts() {
  5475   SortHeapRegionClosure sc(_cur_alloc_region);
  5476   PrintHeapRegionClosure cl;
  5477   heap_region_iterate(&cl);
  5478   heap_region_iterate(&sc);
  5479   sc.print();
  5480   print_region_accounting_info();
  5481 };
  5483 bool G1CollectedHeap::regions_accounted_for() {
  5484   // TODO: regions accounting for young/survivor/tenured
  5485   return true;
  5488 bool G1CollectedHeap::print_region_accounting_info() {
  5489   gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
  5490                          free_regions(),
  5491                          count_free_regions(), count_free_regions_list(),
  5492                          _free_region_list_size, _unclean_region_list.sz());
  5493   gclog_or_tty->print_cr("cur_alloc: %d.",
  5494                          (_cur_alloc_region == NULL ? 0 : 1));
  5495   gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
  5497   // TODO: check regions accounting for young/survivor/tenured
  5498   return true;
  5501 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  5502   HeapRegion* hr = heap_region_containing(p);
  5503   if (hr == NULL) {
  5504     return is_in_permanent(p);
  5505   } else {
  5506     return hr->is_in(p);
  5509 #endif // PRODUCT
  5511 void G1CollectedHeap::g1_unimplemented() {
  5512   // Unimplemented();
  5516 // Local Variables: ***
  5517 // c-indentation-style: gnu ***
  5518 // End: ***

mercurial