src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Mon, 31 Aug 2009 05:27:29 -0700

author
apetrusenko
date
Mon, 31 Aug 2009 05:27:29 -0700
changeset 1375
8624da129f0b
parent 1371
e1fdf4fd34dc
child 1376
8b46c4d82093
permissions
-rw-r--r--

6841313: G1: dirty cards of survivor regions in parallel
Reviewed-by: tonyp, iveresov

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_g1CollectedHeap.cpp.incl"
    28 // turn it on so that the contents of the young list (scan-only /
    29 // to-be-collected) are printed at "strategic" points before / during
    30 // / after the collection --- this is useful for debugging
    31 #define SCAN_ONLY_VERBOSE 0
    32 // CURRENT STATUS
    33 // This file is under construction.  Search for "FIXME".
    35 // INVARIANTS/NOTES
    36 //
    37 // All allocation activity covered by the G1CollectedHeap interface is
    38 //   serialized by acquiring the HeapLock.  This happens in
    39 //   mem_allocate_work, which all such allocation functions call.
    40 //   (Note that this does not apply to TLAB allocation, which is not part
    41 //   of this interface: it is done by clients of this interface.)
    43 // Local to this file.
    45 class RefineCardTableEntryClosure: public CardTableEntryClosure {
    46   SuspendibleThreadSet* _sts;
    47   G1RemSet* _g1rs;
    48   ConcurrentG1Refine* _cg1r;
    49   bool _concurrent;
    50 public:
    51   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
    52                               G1RemSet* g1rs,
    53                               ConcurrentG1Refine* cg1r) :
    54     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
    55   {}
    56   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    57     _g1rs->concurrentRefineOneCard(card_ptr, worker_i);
    58     if (_concurrent && _sts->should_yield()) {
    59       // Caller will actually yield.
    60       return false;
    61     }
    62     // Otherwise, we finished successfully; return true.
    63     return true;
    64   }
    65   void set_concurrent(bool b) { _concurrent = b; }
    66 };
    69 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
    70   int _calls;
    71   G1CollectedHeap* _g1h;
    72   CardTableModRefBS* _ctbs;
    73   int _histo[256];
    74 public:
    75   ClearLoggedCardTableEntryClosure() :
    76     _calls(0)
    77   {
    78     _g1h = G1CollectedHeap::heap();
    79     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
    80     for (int i = 0; i < 256; i++) _histo[i] = 0;
    81   }
    82   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    83     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
    84       _calls++;
    85       unsigned char* ujb = (unsigned char*)card_ptr;
    86       int ind = (int)(*ujb);
    87       _histo[ind]++;
    88       *card_ptr = -1;
    89     }
    90     return true;
    91   }
    92   int calls() { return _calls; }
    93   void print_histo() {
    94     gclog_or_tty->print_cr("Card table value histogram:");
    95     for (int i = 0; i < 256; i++) {
    96       if (_histo[i] != 0) {
    97         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
    98       }
    99     }
   100   }
   101 };
   103 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
   104   int _calls;
   105   G1CollectedHeap* _g1h;
   106   CardTableModRefBS* _ctbs;
   107 public:
   108   RedirtyLoggedCardTableEntryClosure() :
   109     _calls(0)
   110   {
   111     _g1h = G1CollectedHeap::heap();
   112     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   113   }
   114   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   115     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   116       _calls++;
   117       *card_ptr = 0;
   118     }
   119     return true;
   120   }
   121   int calls() { return _calls; }
   122 };
   124 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
   125 public:
   126   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   127     *card_ptr = CardTableModRefBS::dirty_card_val();
   128     return true;
   129   }
   130 };
   132 YoungList::YoungList(G1CollectedHeap* g1h)
   133   : _g1h(g1h), _head(NULL),
   134     _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
   135     _length(0), _scan_only_length(0),
   136     _last_sampled_rs_lengths(0),
   137     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
   138 {
   139   guarantee( check_list_empty(false), "just making sure..." );
   140 }
   142 void YoungList::push_region(HeapRegion *hr) {
   143   assert(!hr->is_young(), "should not already be young");
   144   assert(hr->get_next_young_region() == NULL, "cause it should!");
   146   hr->set_next_young_region(_head);
   147   _head = hr;
   149   hr->set_young();
   150   double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
   151   ++_length;
   152 }
   154 void YoungList::add_survivor_region(HeapRegion* hr) {
   155   assert(hr->is_survivor(), "should be flagged as survivor region");
   156   assert(hr->get_next_young_region() == NULL, "cause it should!");
   158   hr->set_next_young_region(_survivor_head);
   159   if (_survivor_head == NULL) {
   160     _survivor_tail = hr;
   161   }
   162   _survivor_head = hr;
   164   ++_survivor_length;
   165 }
   167 HeapRegion* YoungList::pop_region() {
   168   while (_head != NULL) {
   169     assert( length() > 0, "list should not be empty" );
   170     HeapRegion* ret = _head;
   171     _head = ret->get_next_young_region();
   172     ret->set_next_young_region(NULL);
   173     --_length;
   174     assert(ret->is_young(), "region should be very young");
   176     // Replace 'Survivor' region type with 'Young'. So the region will
   177     // be treated as a young region and will not be 'confused' with
   178     // newly created survivor regions.
   179     if (ret->is_survivor()) {
   180       ret->set_young();
   181     }
   183     if (!ret->is_scan_only()) {
   184       return ret;
   185     }
   187     // scan-only, we'll add it to the scan-only list
   188     if (_scan_only_tail == NULL) {
   189       guarantee( _scan_only_head == NULL, "invariant" );
   191       _scan_only_head = ret;
   192       _curr_scan_only = ret;
   193     } else {
   194       guarantee( _scan_only_head != NULL, "invariant" );
   195       _scan_only_tail->set_next_young_region(ret);
   196     }
   197     guarantee( ret->get_next_young_region() == NULL, "invariant" );
   198     _scan_only_tail = ret;
   200     // no need to be tagged as scan-only any more
   201     ret->set_young();
   203     ++_scan_only_length;
   204   }
   205   assert( length() == 0, "list should be empty" );
   206   return NULL;
   207 }
   209 void YoungList::empty_list(HeapRegion* list) {
   210   while (list != NULL) {
   211     HeapRegion* next = list->get_next_young_region();
   212     list->set_next_young_region(NULL);
   213     list->uninstall_surv_rate_group();
   214     list->set_not_young();
   215     list = next;
   216   }
   217 }
   219 void YoungList::empty_list() {
   220   assert(check_list_well_formed(), "young list should be well formed");
   222   empty_list(_head);
   223   _head = NULL;
   224   _length = 0;
   226   empty_list(_scan_only_head);
   227   _scan_only_head = NULL;
   228   _scan_only_tail = NULL;
   229   _scan_only_length = 0;
   230   _curr_scan_only = NULL;
   232   empty_list(_survivor_head);
   233   _survivor_head = NULL;
   234   _survivor_tail = NULL;
   235   _survivor_length = 0;
   237   _last_sampled_rs_lengths = 0;
   239   assert(check_list_empty(false), "just making sure...");
   240 }
   242 bool YoungList::check_list_well_formed() {
   243   bool ret = true;
   245   size_t length = 0;
   246   HeapRegion* curr = _head;
   247   HeapRegion* last = NULL;
   248   while (curr != NULL) {
   249     if (!curr->is_young() || curr->is_scan_only()) {
   250       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   251                              "incorrectly tagged (%d, %d)",
   252                              curr->bottom(), curr->end(),
   253                              curr->is_young(), curr->is_scan_only());
   254       ret = false;
   255     }
   256     ++length;
   257     last = curr;
   258     curr = curr->get_next_young_region();
   259   }
   260   ret = ret && (length == _length);
   262   if (!ret) {
   263     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   264     gclog_or_tty->print_cr("###   list has %d entries, _length is %d",
   265                            length, _length);
   266   }
   268   bool scan_only_ret = true;
   269   length = 0;
   270   curr = _scan_only_head;
   271   last = NULL;
   272   while (curr != NULL) {
   273     if (!curr->is_young() || curr->is_scan_only()) {
   274       gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
   275                              "incorrectly tagged (%d, %d)",
   276                              curr->bottom(), curr->end(),
   277                              curr->is_young(), curr->is_scan_only());
   278       scan_only_ret = false;
   279     }
   280     ++length;
   281     last = curr;
   282     curr = curr->get_next_young_region();
   283   }
   284   scan_only_ret = scan_only_ret && (length == _scan_only_length);
   286   if ( (last != _scan_only_tail) ||
   287        (_scan_only_head == NULL && _scan_only_tail != NULL) ||
   288        (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
   289      gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
   290      scan_only_ret = false;
   291   }
   293   if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
   294     gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
   295     scan_only_ret = false;
   296    }
   298   if (!scan_only_ret) {
   299     gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
   300     gclog_or_tty->print_cr("###   list has %d entries, _scan_only_length is %d",
   301                   length, _scan_only_length);
   302   }
   304   return ret && scan_only_ret;
   305 }
   307 bool YoungList::check_list_empty(bool ignore_scan_only_list,
   308                                  bool check_sample) {
   309   bool ret = true;
   311   if (_length != 0) {
   312     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
   313                   _length);
   314     ret = false;
   315   }
   316   if (check_sample && _last_sampled_rs_lengths != 0) {
   317     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   318     ret = false;
   319   }
   320   if (_head != NULL) {
   321     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   322     ret = false;
   323   }
   324   if (!ret) {
   325     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   326   }
   328   if (ignore_scan_only_list)
   329     return ret;
   331   bool scan_only_ret = true;
   332   if (_scan_only_length != 0) {
   333     gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
   334                   _scan_only_length);
   335     scan_only_ret = false;
   336   }
   337   if (_scan_only_head != NULL) {
   338     gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
   339      scan_only_ret = false;
   340   }
   341   if (_scan_only_tail != NULL) {
   342     gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
   343     scan_only_ret = false;
   344   }
   345   if (!scan_only_ret) {
   346     gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
   347   }
   349   return ret && scan_only_ret;
   350 }
   352 void
   353 YoungList::rs_length_sampling_init() {
   354   _sampled_rs_lengths = 0;
   355   _curr               = _head;
   356 }
   358 bool
   359 YoungList::rs_length_sampling_more() {
   360   return _curr != NULL;
   361 }
   363 void
   364 YoungList::rs_length_sampling_next() {
   365   assert( _curr != NULL, "invariant" );
   366   _sampled_rs_lengths += _curr->rem_set()->occupied();
   367   _curr = _curr->get_next_young_region();
   368   if (_curr == NULL) {
   369     _last_sampled_rs_lengths = _sampled_rs_lengths;
   370     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   371   }
   372 }
   374 void
   375 YoungList::reset_auxilary_lists() {
   376   // We could have just "moved" the scan-only list to the young list.
   377   // However, the scan-only list is ordered according to the region
   378   // age in descending order, so, by moving one entry at a time, we
   379   // ensure that it is recreated in ascending order.
   381   guarantee( is_empty(), "young list should be empty" );
   382   assert(check_list_well_formed(), "young list should be well formed");
   384   // Add survivor regions to SurvRateGroup.
   385   _g1h->g1_policy()->note_start_adding_survivor_regions();
   386   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   387   for (HeapRegion* curr = _survivor_head;
   388        curr != NULL;
   389        curr = curr->get_next_young_region()) {
   390     _g1h->g1_policy()->set_region_survivors(curr);
   391   }
   392   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   394   if (_survivor_head != NULL) {
   395     _head           = _survivor_head;
   396     _length         = _survivor_length + _scan_only_length;
   397     _survivor_tail->set_next_young_region(_scan_only_head);
   398   } else {
   399     _head           = _scan_only_head;
   400     _length         = _scan_only_length;
   401   }
   403   for (HeapRegion* curr = _scan_only_head;
   404        curr != NULL;
   405        curr = curr->get_next_young_region()) {
   406     curr->recalculate_age_in_surv_rate_group();
   407   }
   408   _scan_only_head   = NULL;
   409   _scan_only_tail   = NULL;
   410   _scan_only_length = 0;
   411   _curr_scan_only   = NULL;
   413   _survivor_head    = NULL;
   414   _survivor_tail   = NULL;
   415   _survivor_length  = 0;
   416   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   418   assert(check_list_well_formed(), "young list should be well formed");
   419 }
   421 void YoungList::print() {
   422   HeapRegion* lists[] = {_head,   _scan_only_head, _survivor_head};
   423   const char* names[] = {"YOUNG", "SCAN-ONLY",     "SURVIVOR"};
   425   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   426     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   427     HeapRegion *curr = lists[list];
   428     if (curr == NULL)
   429       gclog_or_tty->print_cr("  empty");
   430     while (curr != NULL) {
   431       gclog_or_tty->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
   432                              "age: %4d, y: %d, s-o: %d, surv: %d",
   433                              curr->bottom(), curr->end(),
   434                              curr->top(),
   435                              curr->prev_top_at_mark_start(),
   436                              curr->next_top_at_mark_start(),
   437                              curr->top_at_conc_mark_count(),
   438                              curr->age_in_surv_rate_group_cond(),
   439                              curr->is_young(),
   440                              curr->is_scan_only(),
   441                              curr->is_survivor());
   442       curr = curr->get_next_young_region();
   443     }
   444   }
   446   gclog_or_tty->print_cr("");
   447 }
   449 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   450 {
   451   // Claim the right to put the region on the dirty cards region list
   452   // by installing a self pointer.
   453   HeapRegion* next = hr->get_next_dirty_cards_region();
   454   if (next == NULL) {
   455     HeapRegion* res = (HeapRegion*)
   456       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   457                           NULL);
   458     if (res == NULL) {
   459       HeapRegion* head;
   460       do {
   461         // Put the region to the dirty cards region list.
   462         head = _dirty_cards_region_list;
   463         next = (HeapRegion*)
   464           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   465         if (next == head) {
   466           assert(hr->get_next_dirty_cards_region() == hr,
   467                  "hr->get_next_dirty_cards_region() != hr");
   468           if (next == NULL) {
   469             // The last region in the list points to itself.
   470             hr->set_next_dirty_cards_region(hr);
   471           } else {
   472             hr->set_next_dirty_cards_region(next);
   473           }
   474         }
   475       } while (next != head);
   476     }
   477   }
   478 }
   480 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   481 {
   482   HeapRegion* head;
   483   HeapRegion* hr;
   484   do {
   485     head = _dirty_cards_region_list;
   486     if (head == NULL) {
   487       return NULL;
   488     }
   489     HeapRegion* new_head = head->get_next_dirty_cards_region();
   490     if (head == new_head) {
   491       // The last region.
   492       new_head = NULL;
   493     }
   494     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   495                                           head);
   496   } while (hr != head);
   497   assert(hr != NULL, "invariant");
   498   hr->set_next_dirty_cards_region(NULL);
   499   return hr;
   500 }
   502 void G1CollectedHeap::stop_conc_gc_threads() {
   503   _cg1r->stop();
   504   _czft->stop();
   505   _cmThread->stop();
   506 }
   509 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   510   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   511   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   513   // Count the dirty cards at the start.
   514   CountNonCleanMemRegionClosure count1(this);
   515   ct_bs->mod_card_iterate(&count1);
   516   int orig_count = count1.n();
   518   // First clear the logged cards.
   519   ClearLoggedCardTableEntryClosure clear;
   520   dcqs.set_closure(&clear);
   521   dcqs.apply_closure_to_all_completed_buffers();
   522   dcqs.iterate_closure_all_threads(false);
   523   clear.print_histo();
   525   // Now ensure that there's no dirty cards.
   526   CountNonCleanMemRegionClosure count2(this);
   527   ct_bs->mod_card_iterate(&count2);
   528   if (count2.n() != 0) {
   529     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   530                            count2.n(), orig_count);
   531   }
   532   guarantee(count2.n() == 0, "Card table should be clean.");
   534   RedirtyLoggedCardTableEntryClosure redirty;
   535   JavaThread::dirty_card_queue_set().set_closure(&redirty);
   536   dcqs.apply_closure_to_all_completed_buffers();
   537   dcqs.iterate_closure_all_threads(false);
   538   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   539                          clear.calls(), orig_count);
   540   guarantee(redirty.calls() == clear.calls(),
   541             "Or else mechanism is broken.");
   543   CountNonCleanMemRegionClosure count3(this);
   544   ct_bs->mod_card_iterate(&count3);
   545   if (count3.n() != orig_count) {
   546     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   547                            orig_count, count3.n());
   548     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   549   }
   551   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
   552 }
   554 // Private class members.
   556 G1CollectedHeap* G1CollectedHeap::_g1h;
   558 // Private methods.
   560 // Finds a HeapRegion that can be used to allocate a given size of block.
   563 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
   564                                                  bool do_expand,
   565                                                  bool zero_filled) {
   566   ConcurrentZFThread::note_region_alloc();
   567   HeapRegion* res = alloc_free_region_from_lists(zero_filled);
   568   if (res == NULL && do_expand) {
   569     expand(word_size * HeapWordSize);
   570     res = alloc_free_region_from_lists(zero_filled);
   571     assert(res == NULL ||
   572            (!res->isHumongous() &&
   573             (!zero_filled ||
   574              res->zero_fill_state() == HeapRegion::Allocated)),
   575            "Alloc Regions must be zero filled (and non-H)");
   576   }
   577   if (res != NULL && res->is_empty()) _free_regions--;
   578   assert(res == NULL ||
   579          (!res->isHumongous() &&
   580           (!zero_filled ||
   581            res->zero_fill_state() == HeapRegion::Allocated)),
   582          "Non-young alloc Regions must be zero filled (and non-H)");
   584   if (G1PrintRegions) {
   585     if (res != NULL) {
   586       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
   587                              "top "PTR_FORMAT,
   588                              res->hrs_index(), res->bottom(), res->end(), res->top());
   589     }
   590   }
   592   return res;
   593 }
   595 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
   596                                                          size_t word_size,
   597                                                          bool zero_filled) {
   598   HeapRegion* alloc_region = NULL;
   599   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   600     alloc_region = newAllocRegion_work(word_size, true, zero_filled);
   601     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   602       alloc_region->set_survivor();
   603     }
   604     ++_gc_alloc_region_counts[purpose];
   605   } else {
   606     g1_policy()->note_alloc_region_limit_reached(purpose);
   607   }
   608   return alloc_region;
   609 }
   611 // If could fit into free regions w/o expansion, try.
   612 // Otherwise, if can expand, do so.
   613 // Otherwise, if using ex regions might help, try with ex given back.
   614 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
   615   assert(regions_accounted_for(), "Region leakage!");
   617   // We can't allocate H regions while cleanupComplete is running, since
   618   // some of the regions we find to be empty might not yet be added to the
   619   // unclean list.  (If we're already at a safepoint, this call is
   620   // unnecessary, not to mention wrong.)
   621   if (!SafepointSynchronize::is_at_safepoint())
   622     wait_for_cleanup_complete();
   624   size_t num_regions =
   625     round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   627   // Special case if < one region???
   629   // Remember the ft size.
   630   size_t x_size = expansion_regions();
   632   HeapWord* res = NULL;
   633   bool eliminated_allocated_from_lists = false;
   635   // Can the allocation potentially fit in the free regions?
   636   if (free_regions() >= num_regions) {
   637     res = _hrs->obj_allocate(word_size);
   638   }
   639   if (res == NULL) {
   640     // Try expansion.
   641     size_t fs = _hrs->free_suffix();
   642     if (fs + x_size >= num_regions) {
   643       expand((num_regions - fs) * HeapRegion::GrainBytes);
   644       res = _hrs->obj_allocate(word_size);
   645       assert(res != NULL, "This should have worked.");
   646     } else {
   647       // Expansion won't help.  Are there enough free regions if we get rid
   648       // of reservations?
   649       size_t avail = free_regions();
   650       if (avail >= num_regions) {
   651         res = _hrs->obj_allocate(word_size);
   652         if (res != NULL) {
   653           remove_allocated_regions_from_lists();
   654           eliminated_allocated_from_lists = true;
   655         }
   656       }
   657     }
   658   }
   659   if (res != NULL) {
   660     // Increment by the number of regions allocated.
   661     // FIXME: Assumes regions all of size GrainBytes.
   662 #ifndef PRODUCT
   663     mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
   664                                            HeapRegion::GrainWords));
   665 #endif
   666     if (!eliminated_allocated_from_lists)
   667       remove_allocated_regions_from_lists();
   668     _summary_bytes_used += word_size * HeapWordSize;
   669     _free_regions -= num_regions;
   670     _num_humongous_regions += (int) num_regions;
   671   }
   672   assert(regions_accounted_for(), "Region Leakage");
   673   return res;
   674 }
   676 HeapWord*
   677 G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   678                                          bool permit_collection_pause) {
   679   HeapWord* res = NULL;
   680   HeapRegion* allocated_young_region = NULL;
   682   assert( SafepointSynchronize::is_at_safepoint() ||
   683           Heap_lock->owned_by_self(), "pre condition of the call" );
   685   if (isHumongous(word_size)) {
   686     // Allocation of a humongous object can, in a sense, complete a
   687     // partial region, if the previous alloc was also humongous, and
   688     // caused the test below to succeed.
   689     if (permit_collection_pause)
   690       do_collection_pause_if_appropriate(word_size);
   691     res = humongousObjAllocate(word_size);
   692     assert(_cur_alloc_region == NULL
   693            || !_cur_alloc_region->isHumongous(),
   694            "Prevent a regression of this bug.");
   696   } else {
   697     // We may have concurrent cleanup working at the time. Wait for it
   698     // to complete. In the future we would probably want to make the
   699     // concurrent cleanup truly concurrent by decoupling it from the
   700     // allocation.
   701     if (!SafepointSynchronize::is_at_safepoint())
   702       wait_for_cleanup_complete();
   703     // If we do a collection pause, this will be reset to a non-NULL
   704     // value.  If we don't, nulling here ensures that we allocate a new
   705     // region below.
   706     if (_cur_alloc_region != NULL) {
   707       // We're finished with the _cur_alloc_region.
   708       _summary_bytes_used += _cur_alloc_region->used();
   709       _cur_alloc_region = NULL;
   710     }
   711     assert(_cur_alloc_region == NULL, "Invariant.");
   712     // Completion of a heap region is perhaps a good point at which to do
   713     // a collection pause.
   714     if (permit_collection_pause)
   715       do_collection_pause_if_appropriate(word_size);
   716     // Make sure we have an allocation region available.
   717     if (_cur_alloc_region == NULL) {
   718       if (!SafepointSynchronize::is_at_safepoint())
   719         wait_for_cleanup_complete();
   720       bool next_is_young = should_set_young_locked();
   721       // If the next region is not young, make sure it's zero-filled.
   722       _cur_alloc_region = newAllocRegion(word_size, !next_is_young);
   723       if (_cur_alloc_region != NULL) {
   724         _summary_bytes_used -= _cur_alloc_region->used();
   725         if (next_is_young) {
   726           set_region_short_lived_locked(_cur_alloc_region);
   727           allocated_young_region = _cur_alloc_region;
   728         }
   729       }
   730     }
   731     assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
   732            "Prevent a regression of this bug.");
   734     // Now retry the allocation.
   735     if (_cur_alloc_region != NULL) {
   736       res = _cur_alloc_region->allocate(word_size);
   737     }
   738   }
   740   // NOTE: fails frequently in PRT
   741   assert(regions_accounted_for(), "Region leakage!");
   743   if (res != NULL) {
   744     if (!SafepointSynchronize::is_at_safepoint()) {
   745       assert( permit_collection_pause, "invariant" );
   746       assert( Heap_lock->owned_by_self(), "invariant" );
   747       Heap_lock->unlock();
   748     }
   750     if (allocated_young_region != NULL) {
   751       HeapRegion* hr = allocated_young_region;
   752       HeapWord* bottom = hr->bottom();
   753       HeapWord* end = hr->end();
   754       MemRegion mr(bottom, end);
   755       ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
   756     }
   757   }
   759   assert( SafepointSynchronize::is_at_safepoint() ||
   760           (res == NULL && Heap_lock->owned_by_self()) ||
   761           (res != NULL && !Heap_lock->owned_by_self()),
   762           "post condition of the call" );
   764   return res;
   765 }
   767 HeapWord*
   768 G1CollectedHeap::mem_allocate(size_t word_size,
   769                               bool   is_noref,
   770                               bool   is_tlab,
   771                               bool* gc_overhead_limit_was_exceeded) {
   772   debug_only(check_for_valid_allocation_state());
   773   assert(no_gc_in_progress(), "Allocation during gc not allowed");
   774   HeapWord* result = NULL;
   776   // Loop until the allocation is satisified,
   777   // or unsatisfied after GC.
   778   for (int try_count = 1; /* return or throw */; try_count += 1) {
   779     int gc_count_before;
   780     {
   781       Heap_lock->lock();
   782       result = attempt_allocation(word_size);
   783       if (result != NULL) {
   784         // attempt_allocation should have unlocked the heap lock
   785         assert(is_in(result), "result not in heap");
   786         return result;
   787       }
   788       // Read the gc count while the heap lock is held.
   789       gc_count_before = SharedHeap::heap()->total_collections();
   790       Heap_lock->unlock();
   791     }
   793     // Create the garbage collection operation...
   794     VM_G1CollectForAllocation op(word_size,
   795                                  gc_count_before);
   797     // ...and get the VM thread to execute it.
   798     VMThread::execute(&op);
   799     if (op.prologue_succeeded()) {
   800       result = op.result();
   801       assert(result == NULL || is_in(result), "result not in heap");
   802       return result;
   803     }
   805     // Give a warning if we seem to be looping forever.
   806     if ((QueuedAllocationWarningCount > 0) &&
   807         (try_count % QueuedAllocationWarningCount == 0)) {
   808       warning("G1CollectedHeap::mem_allocate_work retries %d times",
   809               try_count);
   810     }
   811   }
   812 }
   814 void G1CollectedHeap::abandon_cur_alloc_region() {
   815   if (_cur_alloc_region != NULL) {
   816     // We're finished with the _cur_alloc_region.
   817     if (_cur_alloc_region->is_empty()) {
   818       _free_regions++;
   819       free_region(_cur_alloc_region);
   820     } else {
   821       _summary_bytes_used += _cur_alloc_region->used();
   822     }
   823     _cur_alloc_region = NULL;
   824   }
   825 }
   827 void G1CollectedHeap::abandon_gc_alloc_regions() {
   828   // first, make sure that the GC alloc region list is empty (it should!)
   829   assert(_gc_alloc_region_list == NULL, "invariant");
   830   release_gc_alloc_regions(true /* totally */);
   831 }
   833 class PostMCRemSetClearClosure: public HeapRegionClosure {
   834   ModRefBarrierSet* _mr_bs;
   835 public:
   836   PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
   837   bool doHeapRegion(HeapRegion* r) {
   838     r->reset_gc_time_stamp();
   839     if (r->continuesHumongous())
   840       return false;
   841     HeapRegionRemSet* hrrs = r->rem_set();
   842     if (hrrs != NULL) hrrs->clear();
   843     // You might think here that we could clear just the cards
   844     // corresponding to the used region.  But no: if we leave a dirty card
   845     // in a region we might allocate into, then it would prevent that card
   846     // from being enqueued, and cause it to be missed.
   847     // Re: the performance cost: we shouldn't be doing full GC anyway!
   848     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
   849     return false;
   850   }
   851 };
   854 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
   855   ModRefBarrierSet* _mr_bs;
   856 public:
   857   PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
   858   bool doHeapRegion(HeapRegion* r) {
   859     if (r->continuesHumongous()) return false;
   860     if (r->used_region().word_size() != 0) {
   861       _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
   862     }
   863     return false;
   864   }
   865 };
   867 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
   868   G1CollectedHeap*   _g1h;
   869   UpdateRSOopClosure _cl;
   870   int                _worker_i;
   871 public:
   872   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
   873     _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
   874     _worker_i(worker_i),
   875     _g1h(g1)
   876   { }
   877   bool doHeapRegion(HeapRegion* r) {
   878     if (!r->continuesHumongous()) {
   879       _cl.set_from(r);
   880       r->oop_iterate(&_cl);
   881     }
   882     return false;
   883   }
   884 };
   886 class ParRebuildRSTask: public AbstractGangTask {
   887   G1CollectedHeap* _g1;
   888 public:
   889   ParRebuildRSTask(G1CollectedHeap* g1)
   890     : AbstractGangTask("ParRebuildRSTask"),
   891       _g1(g1)
   892   { }
   894   void work(int i) {
   895     RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
   896     _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
   897                                          HeapRegion::RebuildRSClaimValue);
   898   }
   899 };
   901 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
   902                                     size_t word_size) {
   903   ResourceMark rm;
   905   if (PrintHeapAtGC) {
   906     Universe::print_heap_before_gc();
   907   }
   909   if (full && DisableExplicitGC) {
   910     gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
   911     return;
   912   }
   914   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   915   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
   917   if (GC_locker::is_active()) {
   918     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   919   }
   921   {
   922     IsGCActiveMark x;
   924     // Timing
   925     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   926     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   927     TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
   929     double start = os::elapsedTime();
   930     g1_policy()->record_full_collection_start();
   932     gc_prologue(true);
   933     increment_total_collections(true /* full gc */);
   935     size_t g1h_prev_used = used();
   936     assert(used() == recalculate_used(), "Should be equal");
   938     if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
   939       HandleMark hm;  // Discard invalid handles created during verification
   940       prepare_for_verify();
   941       gclog_or_tty->print(" VerifyBeforeGC:");
   942       Universe::verify(true);
   943     }
   944     assert(regions_accounted_for(), "Region leakage!");
   946     COMPILER2_PRESENT(DerivedPointerTable::clear());
   948     // We want to discover references, but not process them yet.
   949     // This mode is disabled in
   950     // instanceRefKlass::process_discovered_references if the
   951     // generation does some collection work, or
   952     // instanceRefKlass::enqueue_discovered_references if the
   953     // generation returns without doing any work.
   954     ref_processor()->disable_discovery();
   955     ref_processor()->abandon_partial_discovery();
   956     ref_processor()->verify_no_references_recorded();
   958     // Abandon current iterations of concurrent marking and concurrent
   959     // refinement, if any are in progress.
   960     concurrent_mark()->abort();
   962     // Make sure we'll choose a new allocation region afterwards.
   963     abandon_cur_alloc_region();
   964     abandon_gc_alloc_regions();
   965     assert(_cur_alloc_region == NULL, "Invariant.");
   966     g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
   967     tear_down_region_lists();
   968     set_used_regions_to_need_zero_fill();
   969     if (g1_policy()->in_young_gc_mode()) {
   970       empty_young_list();
   971       g1_policy()->set_full_young_gcs(true);
   972     }
   974     // Temporarily make reference _discovery_ single threaded (non-MT).
   975     ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
   977     // Temporarily make refs discovery atomic
   978     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
   980     // Temporarily clear _is_alive_non_header
   981     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
   983     ref_processor()->enable_discovery();
   984     ref_processor()->setup_policy(clear_all_soft_refs);
   986     // Do collection work
   987     {
   988       HandleMark hm;  // Discard invalid handles created during gc
   989       G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
   990     }
   991     // Because freeing humongous regions may have added some unclean
   992     // regions, it is necessary to tear down again before rebuilding.
   993     tear_down_region_lists();
   994     rebuild_region_lists();
   996     _summary_bytes_used = recalculate_used();
   998     ref_processor()->enqueue_discovered_references();
  1000     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1002     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  1003       HandleMark hm;  // Discard invalid handles created during verification
  1004       gclog_or_tty->print(" VerifyAfterGC:");
  1005       prepare_for_verify();
  1006       Universe::verify(false);
  1008     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
  1010     reset_gc_time_stamp();
  1011     // Since everything potentially moved, we will clear all remembered
  1012     // sets, and clear all cards.  Later we will rebuild remebered
  1013     // sets. We will also reset the GC time stamps of the regions.
  1014     PostMCRemSetClearClosure rs_clear(mr_bs());
  1015     heap_region_iterate(&rs_clear);
  1017     // Resize the heap if necessary.
  1018     resize_if_necessary_after_full_collection(full ? 0 : word_size);
  1020     if (_cg1r->use_cache()) {
  1021       _cg1r->clear_and_record_card_counts();
  1022       _cg1r->clear_hot_cache();
  1025     // Rebuild remembered sets of all regions.
  1026     if (ParallelGCThreads > 0) {
  1027       ParRebuildRSTask rebuild_rs_task(this);
  1028       assert(check_heap_region_claim_values(
  1029              HeapRegion::InitialClaimValue), "sanity check");
  1030       set_par_threads(workers()->total_workers());
  1031       workers()->run_task(&rebuild_rs_task);
  1032       set_par_threads(0);
  1033       assert(check_heap_region_claim_values(
  1034              HeapRegion::RebuildRSClaimValue), "sanity check");
  1035       reset_heap_region_claim_values();
  1036     } else {
  1037       RebuildRSOutOfRegionClosure rebuild_rs(this);
  1038       heap_region_iterate(&rebuild_rs);
  1041     if (PrintGC) {
  1042       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  1045     if (true) { // FIXME
  1046       // Ask the permanent generation to adjust size for full collections
  1047       perm()->compute_new_size();
  1050     double end = os::elapsedTime();
  1051     g1_policy()->record_full_collection_end();
  1053 #ifdef TRACESPINNING
  1054     ParallelTaskTerminator::print_termination_counts();
  1055 #endif
  1057     gc_epilogue(true);
  1059     // Discard all rset updates
  1060     JavaThread::dirty_card_queue_set().abandon_logs();
  1061     assert(!G1DeferredRSUpdate
  1062            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1063     assert(regions_accounted_for(), "Region leakage!");
  1066   if (g1_policy()->in_young_gc_mode()) {
  1067     _young_list->reset_sampled_info();
  1068     assert( check_young_list_empty(false, false),
  1069             "young list should be empty at this point");
  1072   if (PrintHeapAtGC) {
  1073     Universe::print_heap_after_gc();
  1077 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1078   do_collection(true, clear_all_soft_refs, 0);
  1081 // This code is mostly copied from TenuredGeneration.
  1082 void
  1083 G1CollectedHeap::
  1084 resize_if_necessary_after_full_collection(size_t word_size) {
  1085   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
  1087   // Include the current allocation, if any, and bytes that will be
  1088   // pre-allocated to support collections, as "used".
  1089   const size_t used_after_gc = used();
  1090   const size_t capacity_after_gc = capacity();
  1091   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1093   // We don't have floating point command-line arguments
  1094   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100;
  1095   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1096   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
  1097   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1099   size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage);
  1100   size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage);
  1102   // Don't shrink less than the initial size.
  1103   minimum_desired_capacity =
  1104     MAX2(minimum_desired_capacity,
  1105          collector_policy()->initial_heap_byte_size());
  1106   maximum_desired_capacity =
  1107     MAX2(maximum_desired_capacity,
  1108          collector_policy()->initial_heap_byte_size());
  1110   // We are failing here because minimum_desired_capacity is
  1111   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
  1112   assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check");
  1114   if (PrintGC && Verbose) {
  1115     const double free_percentage = ((double)free_after_gc) / capacity();
  1116     gclog_or_tty->print_cr("Computing new size after full GC ");
  1117     gclog_or_tty->print_cr("  "
  1118                            "  minimum_free_percentage: %6.2f",
  1119                            minimum_free_percentage);
  1120     gclog_or_tty->print_cr("  "
  1121                            "  maximum_free_percentage: %6.2f",
  1122                            maximum_free_percentage);
  1123     gclog_or_tty->print_cr("  "
  1124                            "  capacity: %6.1fK"
  1125                            "  minimum_desired_capacity: %6.1fK"
  1126                            "  maximum_desired_capacity: %6.1fK",
  1127                            capacity() / (double) K,
  1128                            minimum_desired_capacity / (double) K,
  1129                            maximum_desired_capacity / (double) K);
  1130     gclog_or_tty->print_cr("  "
  1131                            "   free_after_gc   : %6.1fK"
  1132                            "   used_after_gc   : %6.1fK",
  1133                            free_after_gc / (double) K,
  1134                            used_after_gc / (double) K);
  1135     gclog_or_tty->print_cr("  "
  1136                            "   free_percentage: %6.2f",
  1137                            free_percentage);
  1139   if (capacity() < minimum_desired_capacity) {
  1140     // Don't expand unless it's significant
  1141     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1142     expand(expand_bytes);
  1143     if (PrintGC && Verbose) {
  1144       gclog_or_tty->print_cr("    expanding:"
  1145                              "  minimum_desired_capacity: %6.1fK"
  1146                              "  expand_bytes: %6.1fK",
  1147                              minimum_desired_capacity / (double) K,
  1148                              expand_bytes / (double) K);
  1151     // No expansion, now see if we want to shrink
  1152   } else if (capacity() > maximum_desired_capacity) {
  1153     // Capacity too large, compute shrinking size
  1154     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1155     shrink(shrink_bytes);
  1156     if (PrintGC && Verbose) {
  1157       gclog_or_tty->print_cr("  "
  1158                              "  shrinking:"
  1159                              "  initSize: %.1fK"
  1160                              "  maximum_desired_capacity: %.1fK",
  1161                              collector_policy()->initial_heap_byte_size() / (double) K,
  1162                              maximum_desired_capacity / (double) K);
  1163       gclog_or_tty->print_cr("  "
  1164                              "  shrink_bytes: %.1fK",
  1165                              shrink_bytes / (double) K);
  1171 HeapWord*
  1172 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
  1173   HeapWord* result = NULL;
  1175   // In a G1 heap, we're supposed to keep allocation from failing by
  1176   // incremental pauses.  Therefore, at least for now, we'll favor
  1177   // expansion over collection.  (This might change in the future if we can
  1178   // do something smarter than full collection to satisfy a failed alloc.)
  1180   result = expand_and_allocate(word_size);
  1181   if (result != NULL) {
  1182     assert(is_in(result), "result not in heap");
  1183     return result;
  1186   // OK, I guess we have to try collection.
  1188   do_collection(false, false, word_size);
  1190   result = attempt_allocation(word_size, /*permit_collection_pause*/false);
  1192   if (result != NULL) {
  1193     assert(is_in(result), "result not in heap");
  1194     return result;
  1197   // Try collecting soft references.
  1198   do_collection(false, true, word_size);
  1199   result = attempt_allocation(word_size, /*permit_collection_pause*/false);
  1200   if (result != NULL) {
  1201     assert(is_in(result), "result not in heap");
  1202     return result;
  1205   // What else?  We might try synchronous finalization later.  If the total
  1206   // space available is large enough for the allocation, then a more
  1207   // complete compaction phase than we've tried so far might be
  1208   // appropriate.
  1209   return NULL;
  1212 // Attempting to expand the heap sufficiently
  1213 // to support an allocation of the given "word_size".  If
  1214 // successful, perform the allocation and return the address of the
  1215 // allocated block, or else "NULL".
  1217 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1218   size_t expand_bytes = word_size * HeapWordSize;
  1219   if (expand_bytes < MinHeapDeltaBytes) {
  1220     expand_bytes = MinHeapDeltaBytes;
  1222   expand(expand_bytes);
  1223   assert(regions_accounted_for(), "Region leakage!");
  1224   HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */);
  1225   return result;
  1228 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
  1229   size_t pre_used = 0;
  1230   size_t cleared_h_regions = 0;
  1231   size_t freed_regions = 0;
  1232   UncleanRegionList local_list;
  1233   free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
  1234                                     freed_regions, &local_list);
  1236   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
  1237                           &local_list);
  1238   return pre_used;
  1241 void
  1242 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
  1243                                                    size_t& pre_used,
  1244                                                    size_t& cleared_h,
  1245                                                    size_t& freed_regions,
  1246                                                    UncleanRegionList* list,
  1247                                                    bool par) {
  1248   assert(!hr->continuesHumongous(), "should have filtered these out");
  1249   size_t res = 0;
  1250   if (hr->used() > 0 && hr->garbage_bytes() == hr->used() &&
  1251       !hr->is_young()) {
  1252     if (G1PolicyVerbose > 0)
  1253       gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
  1254                                                                                " during cleanup", hr, hr->used());
  1255     free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
  1259 // FIXME: both this and shrink could probably be more efficient by
  1260 // doing one "VirtualSpace::expand_by" call rather than several.
  1261 void G1CollectedHeap::expand(size_t expand_bytes) {
  1262   size_t old_mem_size = _g1_storage.committed_size();
  1263   // We expand by a minimum of 1K.
  1264   expand_bytes = MAX2(expand_bytes, (size_t)K);
  1265   size_t aligned_expand_bytes =
  1266     ReservedSpace::page_align_size_up(expand_bytes);
  1267   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1268                                        HeapRegion::GrainBytes);
  1269   expand_bytes = aligned_expand_bytes;
  1270   while (expand_bytes > 0) {
  1271     HeapWord* base = (HeapWord*)_g1_storage.high();
  1272     // Commit more storage.
  1273     bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes);
  1274     if (!successful) {
  1275         expand_bytes = 0;
  1276     } else {
  1277       expand_bytes -= HeapRegion::GrainBytes;
  1278       // Expand the committed region.
  1279       HeapWord* high = (HeapWord*) _g1_storage.high();
  1280       _g1_committed.set_end(high);
  1281       // Create a new HeapRegion.
  1282       MemRegion mr(base, high);
  1283       bool is_zeroed = !_g1_max_committed.contains(base);
  1284       HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
  1286       // Now update max_committed if necessary.
  1287       _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
  1289       // Add it to the HeapRegionSeq.
  1290       _hrs->insert(hr);
  1291       // Set the zero-fill state, according to whether it's already
  1292       // zeroed.
  1294         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  1295         if (is_zeroed) {
  1296           hr->set_zero_fill_complete();
  1297           put_free_region_on_list_locked(hr);
  1298         } else {
  1299           hr->set_zero_fill_needed();
  1300           put_region_on_unclean_list_locked(hr);
  1303       _free_regions++;
  1304       // And we used up an expansion region to create it.
  1305       _expansion_regions--;
  1306       // Tell the cardtable about it.
  1307       Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1308       // And the offset table as well.
  1309       _bot_shared->resize(_g1_committed.word_size());
  1312   if (Verbose && PrintGC) {
  1313     size_t new_mem_size = _g1_storage.committed_size();
  1314     gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
  1315                            old_mem_size/K, aligned_expand_bytes/K,
  1316                            new_mem_size/K);
  1320 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
  1322   size_t old_mem_size = _g1_storage.committed_size();
  1323   size_t aligned_shrink_bytes =
  1324     ReservedSpace::page_align_size_down(shrink_bytes);
  1325   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1326                                          HeapRegion::GrainBytes);
  1327   size_t num_regions_deleted = 0;
  1328   MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
  1330   assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1331   if (mr.byte_size() > 0)
  1332     _g1_storage.shrink_by(mr.byte_size());
  1333   assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1335   _g1_committed.set_end(mr.start());
  1336   _free_regions -= num_regions_deleted;
  1337   _expansion_regions += num_regions_deleted;
  1339   // Tell the cardtable about it.
  1340   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1342   // And the offset table as well.
  1343   _bot_shared->resize(_g1_committed.word_size());
  1345   HeapRegionRemSet::shrink_heap(n_regions());
  1347   if (Verbose && PrintGC) {
  1348     size_t new_mem_size = _g1_storage.committed_size();
  1349     gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
  1350                            old_mem_size/K, aligned_shrink_bytes/K,
  1351                            new_mem_size/K);
  1355 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1356   release_gc_alloc_regions(true /* totally */);
  1357   tear_down_region_lists();  // We will rebuild them in a moment.
  1358   shrink_helper(shrink_bytes);
  1359   rebuild_region_lists();
  1362 // Public methods.
  1364 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1365 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1366 #endif // _MSC_VER
  1369 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1370   SharedHeap(policy_),
  1371   _g1_policy(policy_),
  1372   _ref_processor(NULL),
  1373   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1374   _bot_shared(NULL),
  1375   _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
  1376   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1377   _evac_failure_scan_stack(NULL) ,
  1378   _mark_in_progress(false),
  1379   _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
  1380   _cur_alloc_region(NULL),
  1381   _refine_cte_cl(NULL),
  1382   _free_region_list(NULL), _free_region_list_size(0),
  1383   _free_regions(0),
  1384   _full_collection(false),
  1385   _unclean_region_list(),
  1386   _unclean_regions_coming(false),
  1387   _young_list(new YoungList(this)),
  1388   _gc_time_stamp(0),
  1389   _surviving_young_words(NULL),
  1390   _in_cset_fast_test(NULL),
  1391   _in_cset_fast_test_base(NULL),
  1392   _dirty_cards_region_list(NULL) {
  1393   _g1h = this; // To catch bugs.
  1394   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1395     vm_exit_during_initialization("Failed necessary allocation.");
  1397   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1398   _task_queues = new RefToScanQueueSet(n_queues);
  1400   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1401   assert(n_rem_sets > 0, "Invariant.");
  1403   HeapRegionRemSetIterator** iter_arr =
  1404     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
  1405   for (int i = 0; i < n_queues; i++) {
  1406     iter_arr[i] = new HeapRegionRemSetIterator();
  1408   _rem_set_iterator = iter_arr;
  1410   for (int i = 0; i < n_queues; i++) {
  1411     RefToScanQueue* q = new RefToScanQueue();
  1412     q->initialize();
  1413     _task_queues->register_queue(i, q);
  1416   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  1417     _gc_alloc_regions[ap]          = NULL;
  1418     _gc_alloc_region_counts[ap]    = 0;
  1419     _retained_gc_alloc_regions[ap] = NULL;
  1420     // by default, we do not retain a GC alloc region for each ap;
  1421     // we'll override this, when appropriate, below
  1422     _retain_gc_alloc_region[ap]    = false;
  1425   // We will try to remember the last half-full tenured region we
  1426   // allocated to at the end of a collection so that we can re-use it
  1427   // during the next collection.
  1428   _retain_gc_alloc_region[GCAllocForTenured]  = true;
  1430   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1433 jint G1CollectedHeap::initialize() {
  1434   os::enable_vtime();
  1436   // Necessary to satisfy locking discipline assertions.
  1438   MutexLocker x(Heap_lock);
  1440   // While there are no constraints in the GC code that HeapWordSize
  1441   // be any particular value, there are multiple other areas in the
  1442   // system which believe this to be true (e.g. oop->object_size in some
  1443   // cases incorrectly returns the size in wordSize units rather than
  1444   // HeapWordSize).
  1445   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  1447   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  1448   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  1450   // Ensure that the sizes are properly aligned.
  1451   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1452   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1454   // We allocate this in any case, but only do no work if the command line
  1455   // param is off.
  1456   _cg1r = new ConcurrentG1Refine();
  1458   // Reserve the maximum.
  1459   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
  1460   // Includes the perm-gen.
  1462   const size_t total_reserved = max_byte_size + pgs->max_size();
  1463   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
  1465   ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
  1466                         HeapRegion::GrainBytes,
  1467                         false /*ism*/, addr);
  1469   if (UseCompressedOops) {
  1470     if (addr != NULL && !heap_rs.is_reserved()) {
  1471       // Failed to reserve at specified address - the requested memory
  1472       // region is taken already, for example, by 'java' launcher.
  1473       // Try again to reserver heap higher.
  1474       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
  1475       ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
  1476                              false /*ism*/, addr);
  1477       if (addr != NULL && !heap_rs0.is_reserved()) {
  1478         // Failed to reserve at specified address again - give up.
  1479         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
  1480         assert(addr == NULL, "");
  1481         ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
  1482                                false /*ism*/, addr);
  1483         heap_rs = heap_rs1;
  1484       } else {
  1485         heap_rs = heap_rs0;
  1490   if (!heap_rs.is_reserved()) {
  1491     vm_exit_during_initialization("Could not reserve enough space for object heap");
  1492     return JNI_ENOMEM;
  1495   // It is important to do this in a way such that concurrent readers can't
  1496   // temporarily think somethings in the heap.  (I've actually seen this
  1497   // happen in asserts: DLD.)
  1498   _reserved.set_word_size(0);
  1499   _reserved.set_start((HeapWord*)heap_rs.base());
  1500   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  1502   _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
  1504   _num_humongous_regions = 0;
  1506   // Create the gen rem set (and barrier set) for the entire reserved region.
  1507   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  1508   set_barrier_set(rem_set()->bs());
  1509   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  1510     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  1511   } else {
  1512     vm_exit_during_initialization("G1 requires a mod ref bs.");
  1513     return JNI_ENOMEM;
  1516   // Also create a G1 rem set.
  1517   if (G1UseHRIntoRS) {
  1518     if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
  1519       _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
  1520     } else {
  1521       vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
  1522       return JNI_ENOMEM;
  1524   } else {
  1525     _g1_rem_set = new StupidG1RemSet(this);
  1528   // Carve out the G1 part of the heap.
  1530   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  1531   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  1532                            g1_rs.size()/HeapWordSize);
  1533   ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
  1535   _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
  1537   _g1_storage.initialize(g1_rs, 0);
  1538   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  1539   _g1_max_committed = _g1_committed;
  1540   _hrs = new HeapRegionSeq(_expansion_regions);
  1541   guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
  1542   guarantee(_cur_alloc_region == NULL, "from constructor");
  1544   // 6843694 - ensure that the maximum region index can fit
  1545   // in the remembered set structures.
  1546   const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  1547   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  1549   const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
  1550   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  1551   guarantee(cards_per_region < max_cards_per_region, "too many cards per region");
  1553   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  1554                                              heap_word_size(init_byte_size));
  1556   _g1h = this;
  1558   // Create the ConcurrentMark data structure and thread.
  1559   // (Must do this late, so that "max_regions" is defined.)
  1560   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
  1561   _cmThread = _cm->cmThread();
  1563   // ...and the concurrent zero-fill thread, if necessary.
  1564   if (G1ConcZeroFill) {
  1565     _czft = new ConcurrentZFThread();
  1568   // Initialize the from_card cache structure of HeapRegionRemSet.
  1569   HeapRegionRemSet::init_heap(max_regions());
  1571   // Now expand into the initial heap size.
  1572   expand(init_byte_size);
  1574   // Perform any initialization actions delegated to the policy.
  1575   g1_policy()->init();
  1577   g1_policy()->note_start_of_mark_thread();
  1579   _refine_cte_cl =
  1580     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
  1581                                     g1_rem_set(),
  1582                                     concurrent_g1_refine());
  1583   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
  1585   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  1586                                                SATB_Q_FL_lock,
  1587                                                0,
  1588                                                Shared_SATB_Q_lock);
  1590   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1591                                                 DirtyCardQ_FL_lock,
  1592                                                 G1UpdateBufferQueueMaxLength,
  1593                                                 Shared_DirtyCardQ_lock);
  1595   if (G1DeferredRSUpdate) {
  1596     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1597                                       DirtyCardQ_FL_lock,
  1598                                       0,
  1599                                       Shared_DirtyCardQ_lock,
  1600                                       &JavaThread::dirty_card_queue_set());
  1602   // In case we're keeping closure specialization stats, initialize those
  1603   // counts and that mechanism.
  1604   SpecializationStats::clear();
  1606   _gc_alloc_region_list = NULL;
  1608   // Do later initialization work for concurrent refinement.
  1609   _cg1r->init();
  1611   return JNI_OK;
  1614 void G1CollectedHeap::ref_processing_init() {
  1615   SharedHeap::ref_processing_init();
  1616   MemRegion mr = reserved_region();
  1617   _ref_processor = ReferenceProcessor::create_ref_processor(
  1618                                          mr,    // span
  1619                                          false, // Reference discovery is not atomic
  1620                                                 // (though it shouldn't matter here.)
  1621                                          true,  // mt_discovery
  1622                                          NULL,  // is alive closure: need to fill this in for efficiency
  1623                                          ParallelGCThreads,
  1624                                          ParallelRefProcEnabled,
  1625                                          true); // Setting next fields of discovered
  1626                                                 // lists requires a barrier.
  1629 size_t G1CollectedHeap::capacity() const {
  1630   return _g1_committed.byte_size();
  1633 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
  1634                                                  int worker_i) {
  1635   // Clean cards in the hot card cache
  1636   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set());
  1638   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1639   int n_completed_buffers = 0;
  1640   while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) {
  1641     n_completed_buffers++;
  1643   g1_policy()->record_update_rs_processed_buffers(worker_i,
  1644                                                   (double) n_completed_buffers);
  1645   dcqs.clear_n_completed_buffers();
  1646   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  1650 // Computes the sum of the storage used by the various regions.
  1652 size_t G1CollectedHeap::used() const {
  1653   assert(Heap_lock->owner() != NULL,
  1654          "Should be owned on this thread's behalf.");
  1655   size_t result = _summary_bytes_used;
  1656   // Read only once in case it is set to NULL concurrently
  1657   HeapRegion* hr = _cur_alloc_region;
  1658   if (hr != NULL)
  1659     result += hr->used();
  1660   return result;
  1663 size_t G1CollectedHeap::used_unlocked() const {
  1664   size_t result = _summary_bytes_used;
  1665   return result;
  1668 class SumUsedClosure: public HeapRegionClosure {
  1669   size_t _used;
  1670 public:
  1671   SumUsedClosure() : _used(0) {}
  1672   bool doHeapRegion(HeapRegion* r) {
  1673     if (!r->continuesHumongous()) {
  1674       _used += r->used();
  1676     return false;
  1678   size_t result() { return _used; }
  1679 };
  1681 size_t G1CollectedHeap::recalculate_used() const {
  1682   SumUsedClosure blk;
  1683   _hrs->iterate(&blk);
  1684   return blk.result();
  1687 #ifndef PRODUCT
  1688 class SumUsedRegionsClosure: public HeapRegionClosure {
  1689   size_t _num;
  1690 public:
  1691   SumUsedRegionsClosure() : _num(0) {}
  1692   bool doHeapRegion(HeapRegion* r) {
  1693     if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
  1694       _num += 1;
  1696     return false;
  1698   size_t result() { return _num; }
  1699 };
  1701 size_t G1CollectedHeap::recalculate_used_regions() const {
  1702   SumUsedRegionsClosure blk;
  1703   _hrs->iterate(&blk);
  1704   return blk.result();
  1706 #endif // PRODUCT
  1708 size_t G1CollectedHeap::unsafe_max_alloc() {
  1709   if (_free_regions > 0) return HeapRegion::GrainBytes;
  1710   // otherwise, is there space in the current allocation region?
  1712   // We need to store the current allocation region in a local variable
  1713   // here. The problem is that this method doesn't take any locks and
  1714   // there may be other threads which overwrite the current allocation
  1715   // region field. attempt_allocation(), for example, sets it to NULL
  1716   // and this can happen *after* the NULL check here but before the call
  1717   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  1718   // to be a problem in the optimized build, since the two loads of the
  1719   // current allocation region field are optimized away.
  1720   HeapRegion* car = _cur_alloc_region;
  1722   // FIXME: should iterate over all regions?
  1723   if (car == NULL) {
  1724     return 0;
  1726   return car->free();
  1729 void G1CollectedHeap::collect(GCCause::Cause cause) {
  1730   // The caller doesn't have the Heap_lock
  1731   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  1732   MutexLocker ml(Heap_lock);
  1733   collect_locked(cause);
  1736 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  1737   assert(Thread::current()->is_VM_thread(), "Precondition#1");
  1738   assert(Heap_lock->is_locked(), "Precondition#2");
  1739   GCCauseSetter gcs(this, cause);
  1740   switch (cause) {
  1741     case GCCause::_heap_inspection:
  1742     case GCCause::_heap_dump: {
  1743       HandleMark hm;
  1744       do_full_collection(false);         // don't clear all soft refs
  1745       break;
  1747     default: // XXX FIX ME
  1748       ShouldNotReachHere(); // Unexpected use of this function
  1753 void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
  1754   // Don't want to do a GC until cleanup is completed.
  1755   wait_for_cleanup_complete();
  1757   // Read the GC count while holding the Heap_lock
  1758   int gc_count_before = SharedHeap::heap()->total_collections();
  1760     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
  1761     VM_G1CollectFull op(gc_count_before, cause);
  1762     VMThread::execute(&op);
  1766 bool G1CollectedHeap::is_in(const void* p) const {
  1767   if (_g1_committed.contains(p)) {
  1768     HeapRegion* hr = _hrs->addr_to_region(p);
  1769     return hr->is_in(p);
  1770   } else {
  1771     return _perm_gen->as_gen()->is_in(p);
  1775 // Iteration functions.
  1777 // Iterates an OopClosure over all ref-containing fields of objects
  1778 // within a HeapRegion.
  1780 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  1781   MemRegion _mr;
  1782   OopClosure* _cl;
  1783 public:
  1784   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  1785     : _mr(mr), _cl(cl) {}
  1786   bool doHeapRegion(HeapRegion* r) {
  1787     if (! r->continuesHumongous()) {
  1788       r->oop_iterate(_cl);
  1790     return false;
  1792 };
  1794 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
  1795   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  1796   _hrs->iterate(&blk);
  1797   if (do_perm) {
  1798     perm_gen()->oop_iterate(cl);
  1802 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
  1803   IterateOopClosureRegionClosure blk(mr, cl);
  1804   _hrs->iterate(&blk);
  1805   if (do_perm) {
  1806     perm_gen()->oop_iterate(cl);
  1810 // Iterates an ObjectClosure over all objects within a HeapRegion.
  1812 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  1813   ObjectClosure* _cl;
  1814 public:
  1815   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  1816   bool doHeapRegion(HeapRegion* r) {
  1817     if (! r->continuesHumongous()) {
  1818       r->object_iterate(_cl);
  1820     return false;
  1822 };
  1824 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
  1825   IterateObjectClosureRegionClosure blk(cl);
  1826   _hrs->iterate(&blk);
  1827   if (do_perm) {
  1828     perm_gen()->object_iterate(cl);
  1832 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
  1833   // FIXME: is this right?
  1834   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
  1837 // Calls a SpaceClosure on a HeapRegion.
  1839 class SpaceClosureRegionClosure: public HeapRegionClosure {
  1840   SpaceClosure* _cl;
  1841 public:
  1842   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  1843   bool doHeapRegion(HeapRegion* r) {
  1844     _cl->do_space(r);
  1845     return false;
  1847 };
  1849 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  1850   SpaceClosureRegionClosure blk(cl);
  1851   _hrs->iterate(&blk);
  1854 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
  1855   _hrs->iterate(cl);
  1858 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
  1859                                                HeapRegionClosure* cl) {
  1860   _hrs->iterate_from(r, cl);
  1863 void
  1864 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
  1865   _hrs->iterate_from(idx, cl);
  1868 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
  1870 void
  1871 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  1872                                                  int worker,
  1873                                                  jint claim_value) {
  1874   const size_t regions = n_regions();
  1875   const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
  1876   // try to spread out the starting points of the workers
  1877   const size_t start_index = regions / worker_num * (size_t) worker;
  1879   // each worker will actually look at all regions
  1880   for (size_t count = 0; count < regions; ++count) {
  1881     const size_t index = (start_index + count) % regions;
  1882     assert(0 <= index && index < regions, "sanity");
  1883     HeapRegion* r = region_at(index);
  1884     // we'll ignore "continues humongous" regions (we'll process them
  1885     // when we come across their corresponding "start humongous"
  1886     // region) and regions already claimed
  1887     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  1888       continue;
  1890     // OK, try to claim it
  1891     if (r->claimHeapRegion(claim_value)) {
  1892       // success!
  1893       assert(!r->continuesHumongous(), "sanity");
  1894       if (r->startsHumongous()) {
  1895         // If the region is "starts humongous" we'll iterate over its
  1896         // "continues humongous" first; in fact we'll do them
  1897         // first. The order is important. In on case, calling the
  1898         // closure on the "starts humongous" region might de-allocate
  1899         // and clear all its "continues humongous" regions and, as a
  1900         // result, we might end up processing them twice. So, we'll do
  1901         // them first (notice: most closures will ignore them anyway) and
  1902         // then we'll do the "starts humongous" region.
  1903         for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
  1904           HeapRegion* chr = region_at(ch_index);
  1906           // if the region has already been claimed or it's not
  1907           // "continues humongous" we're done
  1908           if (chr->claim_value() == claim_value ||
  1909               !chr->continuesHumongous()) {
  1910             break;
  1913           // Noone should have claimed it directly. We can given
  1914           // that we claimed its "starts humongous" region.
  1915           assert(chr->claim_value() != claim_value, "sanity");
  1916           assert(chr->humongous_start_region() == r, "sanity");
  1918           if (chr->claimHeapRegion(claim_value)) {
  1919             // we should always be able to claim it; noone else should
  1920             // be trying to claim this region
  1922             bool res2 = cl->doHeapRegion(chr);
  1923             assert(!res2, "Should not abort");
  1925             // Right now, this holds (i.e., no closure that actually
  1926             // does something with "continues humongous" regions
  1927             // clears them). We might have to weaken it in the future,
  1928             // but let's leave these two asserts here for extra safety.
  1929             assert(chr->continuesHumongous(), "should still be the case");
  1930             assert(chr->humongous_start_region() == r, "sanity");
  1931           } else {
  1932             guarantee(false, "we should not reach here");
  1937       assert(!r->continuesHumongous(), "sanity");
  1938       bool res = cl->doHeapRegion(r);
  1939       assert(!res, "Should not abort");
  1944 class ResetClaimValuesClosure: public HeapRegionClosure {
  1945 public:
  1946   bool doHeapRegion(HeapRegion* r) {
  1947     r->set_claim_value(HeapRegion::InitialClaimValue);
  1948     return false;
  1950 };
  1952 void
  1953 G1CollectedHeap::reset_heap_region_claim_values() {
  1954   ResetClaimValuesClosure blk;
  1955   heap_region_iterate(&blk);
  1958 #ifdef ASSERT
  1959 // This checks whether all regions in the heap have the correct claim
  1960 // value. I also piggy-backed on this a check to ensure that the
  1961 // humongous_start_region() information on "continues humongous"
  1962 // regions is correct.
  1964 class CheckClaimValuesClosure : public HeapRegionClosure {
  1965 private:
  1966   jint _claim_value;
  1967   size_t _failures;
  1968   HeapRegion* _sh_region;
  1969 public:
  1970   CheckClaimValuesClosure(jint claim_value) :
  1971     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  1972   bool doHeapRegion(HeapRegion* r) {
  1973     if (r->claim_value() != _claim_value) {
  1974       gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  1975                              "claim value = %d, should be %d",
  1976                              r->bottom(), r->end(), r->claim_value(),
  1977                              _claim_value);
  1978       ++_failures;
  1980     if (!r->isHumongous()) {
  1981       _sh_region = NULL;
  1982     } else if (r->startsHumongous()) {
  1983       _sh_region = r;
  1984     } else if (r->continuesHumongous()) {
  1985       if (r->humongous_start_region() != _sh_region) {
  1986         gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  1987                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  1988                                r->bottom(), r->end(),
  1989                                r->humongous_start_region(),
  1990                                _sh_region);
  1991         ++_failures;
  1994     return false;
  1996   size_t failures() {
  1997     return _failures;
  1999 };
  2001 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  2002   CheckClaimValuesClosure cl(claim_value);
  2003   heap_region_iterate(&cl);
  2004   return cl.failures() == 0;
  2006 #endif // ASSERT
  2008 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2009   HeapRegion* r = g1_policy()->collection_set();
  2010   while (r != NULL) {
  2011     HeapRegion* next = r->next_in_collection_set();
  2012     if (cl->doHeapRegion(r)) {
  2013       cl->incomplete();
  2014       return;
  2016     r = next;
  2020 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2021                                                   HeapRegionClosure *cl) {
  2022   assert(r->in_collection_set(),
  2023          "Start region must be a member of the collection set.");
  2024   HeapRegion* cur = r;
  2025   while (cur != NULL) {
  2026     HeapRegion* next = cur->next_in_collection_set();
  2027     if (cl->doHeapRegion(cur) && false) {
  2028       cl->incomplete();
  2029       return;
  2031     cur = next;
  2033   cur = g1_policy()->collection_set();
  2034   while (cur != r) {
  2035     HeapRegion* next = cur->next_in_collection_set();
  2036     if (cl->doHeapRegion(cur) && false) {
  2037       cl->incomplete();
  2038       return;
  2040     cur = next;
  2044 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
  2045   return _hrs->length() > 0 ? _hrs->at(0) : NULL;
  2049 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2050   Space* res = heap_region_containing(addr);
  2051   if (res == NULL)
  2052     res = perm_gen()->space_containing(addr);
  2053   return res;
  2056 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2057   Space* sp = space_containing(addr);
  2058   if (sp != NULL) {
  2059     return sp->block_start(addr);
  2061   return NULL;
  2064 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  2065   Space* sp = space_containing(addr);
  2066   assert(sp != NULL, "block_size of address outside of heap");
  2067   return sp->block_size(addr);
  2070 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  2071   Space* sp = space_containing(addr);
  2072   return sp->block_is_obj(addr);
  2075 bool G1CollectedHeap::supports_tlab_allocation() const {
  2076   return true;
  2079 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  2080   return HeapRegion::GrainBytes;
  2083 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  2084   // Return the remaining space in the cur alloc region, but not less than
  2085   // the min TLAB size.
  2086   // Also, no more than half the region size, since we can't allow tlabs to
  2087   // grow big enough to accomodate humongous objects.
  2089   // We need to story it locally, since it might change between when we
  2090   // test for NULL and when we use it later.
  2091   ContiguousSpace* cur_alloc_space = _cur_alloc_region;
  2092   if (cur_alloc_space == NULL) {
  2093     return HeapRegion::GrainBytes/2;
  2094   } else {
  2095     return MAX2(MIN2(cur_alloc_space->free(),
  2096                      (size_t)(HeapRegion::GrainBytes/2)),
  2097                 (size_t)MinTLABSize);
  2101 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) {
  2102   bool dummy;
  2103   return G1CollectedHeap::mem_allocate(size, false, true, &dummy);
  2106 bool G1CollectedHeap::allocs_are_zero_filled() {
  2107   return false;
  2110 size_t G1CollectedHeap::large_typearray_limit() {
  2111   // FIXME
  2112   return HeapRegion::GrainBytes/HeapWordSize;
  2115 size_t G1CollectedHeap::max_capacity() const {
  2116   return _g1_committed.byte_size();
  2119 jlong G1CollectedHeap::millis_since_last_gc() {
  2120   // assert(false, "NYI");
  2121   return 0;
  2125 void G1CollectedHeap::prepare_for_verify() {
  2126   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2127     ensure_parsability(false);
  2129   g1_rem_set()->prepare_for_verify();
  2132 class VerifyLivenessOopClosure: public OopClosure {
  2133   G1CollectedHeap* g1h;
  2134 public:
  2135   VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
  2136     g1h = _g1h;
  2138   void do_oop(narrowOop *p) { do_oop_work(p); }
  2139   void do_oop(      oop *p) { do_oop_work(p); }
  2141   template <class T> void do_oop_work(T *p) {
  2142     oop obj = oopDesc::load_decode_heap_oop(p);
  2143     guarantee(obj == NULL || !g1h->is_obj_dead(obj),
  2144               "Dead object referenced by a not dead object");
  2146 };
  2148 class VerifyObjsInRegionClosure: public ObjectClosure {
  2149 private:
  2150   G1CollectedHeap* _g1h;
  2151   size_t _live_bytes;
  2152   HeapRegion *_hr;
  2153   bool _use_prev_marking;
  2154 public:
  2155   // use_prev_marking == true  -> use "prev" marking information,
  2156   // use_prev_marking == false -> use "next" marking information
  2157   VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking)
  2158     : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) {
  2159     _g1h = G1CollectedHeap::heap();
  2161   void do_object(oop o) {
  2162     VerifyLivenessOopClosure isLive(_g1h);
  2163     assert(o != NULL, "Huh?");
  2164     if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
  2165       o->oop_iterate(&isLive);
  2166       if (!_hr->obj_allocated_since_prev_marking(o))
  2167         _live_bytes += (o->size() * HeapWordSize);
  2170   size_t live_bytes() { return _live_bytes; }
  2171 };
  2173 class PrintObjsInRegionClosure : public ObjectClosure {
  2174   HeapRegion *_hr;
  2175   G1CollectedHeap *_g1;
  2176 public:
  2177   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  2178     _g1 = G1CollectedHeap::heap();
  2179   };
  2181   void do_object(oop o) {
  2182     if (o != NULL) {
  2183       HeapWord *start = (HeapWord *) o;
  2184       size_t word_sz = o->size();
  2185       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  2186                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  2187                           (void*) o, word_sz,
  2188                           _g1->isMarkedPrev(o),
  2189                           _g1->isMarkedNext(o),
  2190                           _hr->obj_allocated_since_prev_marking(o));
  2191       HeapWord *end = start + word_sz;
  2192       HeapWord *cur;
  2193       int *val;
  2194       for (cur = start; cur < end; cur++) {
  2195         val = (int *) cur;
  2196         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  2200 };
  2202 class VerifyRegionClosure: public HeapRegionClosure {
  2203 private:
  2204   bool _allow_dirty;
  2205   bool _par;
  2206   bool _use_prev_marking;
  2207 public:
  2208   // use_prev_marking == true  -> use "prev" marking information,
  2209   // use_prev_marking == false -> use "next" marking information
  2210   VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
  2211     : _allow_dirty(allow_dirty),
  2212       _par(par),
  2213       _use_prev_marking(use_prev_marking) {}
  2215   bool doHeapRegion(HeapRegion* r) {
  2216     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
  2217               "Should be unclaimed at verify points.");
  2218     if (!r->continuesHumongous()) {
  2219       VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
  2220       r->verify(_allow_dirty, _use_prev_marking);
  2221       r->object_iterate(&not_dead_yet_cl);
  2222       guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
  2223                 "More live objects than counted in last complete marking.");
  2225     return false;
  2227 };
  2229 class VerifyRootsClosure: public OopsInGenClosure {
  2230 private:
  2231   G1CollectedHeap* _g1h;
  2232   bool             _failures;
  2233   bool             _use_prev_marking;
  2234 public:
  2235   // use_prev_marking == true  -> use "prev" marking information,
  2236   // use_prev_marking == false -> use "next" marking information
  2237   VerifyRootsClosure(bool use_prev_marking) :
  2238     _g1h(G1CollectedHeap::heap()),
  2239     _failures(false),
  2240     _use_prev_marking(use_prev_marking) { }
  2242   bool failures() { return _failures; }
  2244   template <class T> void do_oop_nv(T* p) {
  2245     T heap_oop = oopDesc::load_heap_oop(p);
  2246     if (!oopDesc::is_null(heap_oop)) {
  2247       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2248       if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
  2249         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  2250                                "points to dead obj "PTR_FORMAT, p, (void*) obj);
  2251         obj->print_on(gclog_or_tty);
  2252         _failures = true;
  2257   void do_oop(oop* p)       { do_oop_nv(p); }
  2258   void do_oop(narrowOop* p) { do_oop_nv(p); }
  2259 };
  2261 // This is the task used for parallel heap verification.
  2263 class G1ParVerifyTask: public AbstractGangTask {
  2264 private:
  2265   G1CollectedHeap* _g1h;
  2266   bool _allow_dirty;
  2267   bool _use_prev_marking;
  2269 public:
  2270   // use_prev_marking == true  -> use "prev" marking information,
  2271   // use_prev_marking == false -> use "next" marking information
  2272   G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
  2273                   bool use_prev_marking) :
  2274     AbstractGangTask("Parallel verify task"),
  2275     _g1h(g1h),
  2276     _allow_dirty(allow_dirty),
  2277     _use_prev_marking(use_prev_marking) { }
  2279   void work(int worker_i) {
  2280     HandleMark hm;
  2281     VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
  2282     _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
  2283                                           HeapRegion::ParVerifyClaimValue);
  2285 };
  2287 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
  2288   verify(allow_dirty, silent, /* use_prev_marking */ true);
  2291 void G1CollectedHeap::verify(bool allow_dirty,
  2292                              bool silent,
  2293                              bool use_prev_marking) {
  2294   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2295     if (!silent) { gclog_or_tty->print("roots "); }
  2296     VerifyRootsClosure rootsCl(use_prev_marking);
  2297     process_strong_roots(false,
  2298                          SharedHeap::SO_AllClasses,
  2299                          &rootsCl,
  2300                          &rootsCl);
  2301     rem_set()->invalidate(perm_gen()->used_region(), false);
  2302     if (!silent) { gclog_or_tty->print("heapRegions "); }
  2303     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  2304       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2305              "sanity check");
  2307       G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
  2308       int n_workers = workers()->total_workers();
  2309       set_par_threads(n_workers);
  2310       workers()->run_task(&task);
  2311       set_par_threads(0);
  2313       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  2314              "sanity check");
  2316       reset_heap_region_claim_values();
  2318       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2319              "sanity check");
  2320     } else {
  2321       VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
  2322       _hrs->iterate(&blk);
  2324     if (!silent) gclog_or_tty->print("remset ");
  2325     rem_set()->verify();
  2326     guarantee(!rootsCl.failures(), "should not have had failures");
  2327   } else {
  2328     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
  2332 class PrintRegionClosure: public HeapRegionClosure {
  2333   outputStream* _st;
  2334 public:
  2335   PrintRegionClosure(outputStream* st) : _st(st) {}
  2336   bool doHeapRegion(HeapRegion* r) {
  2337     r->print_on(_st);
  2338     return false;
  2340 };
  2342 void G1CollectedHeap::print() const { print_on(tty); }
  2344 void G1CollectedHeap::print_on(outputStream* st) const {
  2345   print_on(st, PrintHeapAtGCExtended);
  2348 void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
  2349   st->print(" %-20s", "garbage-first heap");
  2350   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  2351             capacity()/K, used_unlocked()/K);
  2352   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  2353             _g1_storage.low_boundary(),
  2354             _g1_storage.high(),
  2355             _g1_storage.high_boundary());
  2356   st->cr();
  2357   st->print("  region size " SIZE_FORMAT "K, ",
  2358             HeapRegion::GrainBytes/K);
  2359   size_t young_regions = _young_list->length();
  2360   st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
  2361             young_regions, young_regions * HeapRegion::GrainBytes / K);
  2362   size_t survivor_regions = g1_policy()->recorded_survivor_regions();
  2363   st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
  2364             survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
  2365   st->cr();
  2366   perm()->as_gen()->print_on(st);
  2367   if (extended) {
  2368     print_on_extended(st);
  2372 void G1CollectedHeap::print_on_extended(outputStream* st) const {
  2373   PrintRegionClosure blk(st);
  2374   _hrs->iterate(&blk);
  2377 class PrintOnThreadsClosure : public ThreadClosure {
  2378   outputStream* _st;
  2379 public:
  2380   PrintOnThreadsClosure(outputStream* st) : _st(st) { }
  2381   virtual void do_thread(Thread *t) {
  2382     t->print_on(_st);
  2384 };
  2386 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  2387   if (ParallelGCThreads > 0) {
  2388     workers()->print_worker_threads();
  2390   st->print("\"G1 concurrent mark GC Thread\" ");
  2391   _cmThread->print();
  2392   st->cr();
  2393   st->print("\"G1 concurrent refinement GC Threads\" ");
  2394   PrintOnThreadsClosure p(st);
  2395   _cg1r->threads_do(&p);
  2396   st->cr();
  2397   st->print("\"G1 zero-fill GC Thread\" ");
  2398   _czft->print_on(st);
  2399   st->cr();
  2402 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  2403   if (ParallelGCThreads > 0) {
  2404     workers()->threads_do(tc);
  2406   tc->do_thread(_cmThread);
  2407   _cg1r->threads_do(tc);
  2408   tc->do_thread(_czft);
  2411 void G1CollectedHeap::print_tracing_info() const {
  2412   // We'll overload this to mean "trace GC pause statistics."
  2413   if (TraceGen0Time || TraceGen1Time) {
  2414     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  2415     // to that.
  2416     g1_policy()->print_tracing_info();
  2418   if (G1SummarizeRSetStats) {
  2419     g1_rem_set()->print_summary_info();
  2421   if (G1SummarizeConcurrentMark) {
  2422     concurrent_mark()->print_summary_info();
  2424   if (G1SummarizeZFStats) {
  2425     ConcurrentZFThread::print_summary_info();
  2427   g1_policy()->print_yg_surv_rate_info();
  2429   SpecializationStats::print();
  2433 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
  2434   HeapRegion* hr = heap_region_containing(addr);
  2435   if (hr == NULL) {
  2436     return 0;
  2437   } else {
  2438     return 1;
  2442 G1CollectedHeap* G1CollectedHeap::heap() {
  2443   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  2444          "not a garbage-first heap");
  2445   return _g1h;
  2448 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  2449   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  2450   // Call allocation profiler
  2451   AllocationProfiler::iterate_since_last_gc();
  2452   // Fill TLAB's and such
  2453   ensure_parsability(true);
  2456 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  2457   // FIXME: what is this about?
  2458   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  2459   // is set.
  2460   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  2461                         "derived pointer present"));
  2464 void G1CollectedHeap::do_collection_pause() {
  2465   // Read the GC count while holding the Heap_lock
  2466   // we need to do this _before_ wait_for_cleanup_complete(), to
  2467   // ensure that we do not give up the heap lock and potentially
  2468   // pick up the wrong count
  2469   int gc_count_before = SharedHeap::heap()->total_collections();
  2471   // Don't want to do a GC pause while cleanup is being completed!
  2472   wait_for_cleanup_complete();
  2474   g1_policy()->record_stop_world_start();
  2476     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
  2477     VM_G1IncCollectionPause op(gc_count_before);
  2478     VMThread::execute(&op);
  2482 void
  2483 G1CollectedHeap::doConcurrentMark() {
  2484   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2485   if (!_cmThread->in_progress()) {
  2486     _cmThread->set_started();
  2487     CGC_lock->notify();
  2491 class VerifyMarkedObjsClosure: public ObjectClosure {
  2492     G1CollectedHeap* _g1h;
  2493     public:
  2494     VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
  2495     void do_object(oop obj) {
  2496       assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
  2497              "markandsweep mark should agree with concurrent deadness");
  2499 };
  2501 void
  2502 G1CollectedHeap::checkConcurrentMark() {
  2503     VerifyMarkedObjsClosure verifycl(this);
  2504     //    MutexLockerEx x(getMarkBitMapLock(),
  2505     //              Mutex::_no_safepoint_check_flag);
  2506     object_iterate(&verifycl, false);
  2509 void G1CollectedHeap::do_sync_mark() {
  2510   _cm->checkpointRootsInitial();
  2511   _cm->markFromRoots();
  2512   _cm->checkpointRootsFinal(false);
  2515 // <NEW PREDICTION>
  2517 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
  2518                                                        bool young) {
  2519   return _g1_policy->predict_region_elapsed_time_ms(hr, young);
  2522 void G1CollectedHeap::check_if_region_is_too_expensive(double
  2523                                                            predicted_time_ms) {
  2524   _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
  2527 size_t G1CollectedHeap::pending_card_num() {
  2528   size_t extra_cards = 0;
  2529   JavaThread *curr = Threads::first();
  2530   while (curr != NULL) {
  2531     DirtyCardQueue& dcq = curr->dirty_card_queue();
  2532     extra_cards += dcq.size();
  2533     curr = curr->next();
  2535   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2536   size_t buffer_size = dcqs.buffer_size();
  2537   size_t buffer_num = dcqs.completed_buffers_num();
  2538   return buffer_size * buffer_num + extra_cards;
  2541 size_t G1CollectedHeap::max_pending_card_num() {
  2542   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2543   size_t buffer_size = dcqs.buffer_size();
  2544   size_t buffer_num  = dcqs.completed_buffers_num();
  2545   int thread_num  = Threads::number_of_threads();
  2546   return (buffer_num + thread_num) * buffer_size;
  2549 size_t G1CollectedHeap::cards_scanned() {
  2550   HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
  2551   return g1_rset->cardsScanned();
  2554 void
  2555 G1CollectedHeap::setup_surviving_young_words() {
  2556   guarantee( _surviving_young_words == NULL, "pre-condition" );
  2557   size_t array_length = g1_policy()->young_cset_length();
  2558   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
  2559   if (_surviving_young_words == NULL) {
  2560     vm_exit_out_of_memory(sizeof(size_t) * array_length,
  2561                           "Not enough space for young surv words summary.");
  2563   memset(_surviving_young_words, 0, array_length * sizeof(size_t));
  2564 #ifdef ASSERT
  2565   for (size_t i = 0;  i < array_length; ++i) {
  2566     assert( _surviving_young_words[i] == 0, "memset above" );
  2568 #endif // !ASSERT
  2571 void
  2572 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  2573   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  2574   size_t array_length = g1_policy()->young_cset_length();
  2575   for (size_t i = 0; i < array_length; ++i)
  2576     _surviving_young_words[i] += surv_young_words[i];
  2579 void
  2580 G1CollectedHeap::cleanup_surviving_young_words() {
  2581   guarantee( _surviving_young_words != NULL, "pre-condition" );
  2582   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
  2583   _surviving_young_words = NULL;
  2586 // </NEW PREDICTION>
  2588 void
  2589 G1CollectedHeap::do_collection_pause_at_safepoint() {
  2590   if (PrintHeapAtGC) {
  2591     Universe::print_heap_before_gc();
  2595     char verbose_str[128];
  2596     sprintf(verbose_str, "GC pause ");
  2597     if (g1_policy()->in_young_gc_mode()) {
  2598       if (g1_policy()->full_young_gcs())
  2599         strcat(verbose_str, "(young)");
  2600       else
  2601         strcat(verbose_str, "(partial)");
  2603     if (g1_policy()->should_initiate_conc_mark())
  2604       strcat(verbose_str, " (initial-mark)");
  2606     GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
  2608     // if PrintGCDetails is on, we'll print long statistics information
  2609     // in the collector policy code, so let's not print this as the output
  2610     // is messy if we do.
  2611     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  2612     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  2613     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
  2615     ResourceMark rm;
  2616     assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  2617     assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
  2618     guarantee(!is_gc_active(), "collection is not reentrant");
  2619     assert(regions_accounted_for(), "Region leakage!");
  2621     increment_gc_time_stamp();
  2623     if (g1_policy()->in_young_gc_mode()) {
  2624       assert(check_young_list_well_formed(),
  2625              "young list should be well formed");
  2628     if (GC_locker::is_active()) {
  2629       return; // GC is disabled (e.g. JNI GetXXXCritical operation)
  2632     bool abandoned = false;
  2633     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  2634       IsGCActiveMark x;
  2636       gc_prologue(false);
  2637       increment_total_collections(false /* full gc */);
  2639 #if G1_REM_SET_LOGGING
  2640       gclog_or_tty->print_cr("\nJust chose CS, heap:");
  2641       print();
  2642 #endif
  2644       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  2645         HandleMark hm;  // Discard invalid handles created during verification
  2646         prepare_for_verify();
  2647         gclog_or_tty->print(" VerifyBeforeGC:");
  2648         Universe::verify(false);
  2651       COMPILER2_PRESENT(DerivedPointerTable::clear());
  2653       // We want to turn off ref discovery, if necessary, and turn it back on
  2654       // on again later if we do. XXX Dubious: why is discovery disabled?
  2655       bool was_enabled = ref_processor()->discovery_enabled();
  2656       if (was_enabled) ref_processor()->disable_discovery();
  2658       // Forget the current alloc region (we might even choose it to be part
  2659       // of the collection set!).
  2660       abandon_cur_alloc_region();
  2662       // The elapsed time induced by the start time below deliberately elides
  2663       // the possible verification above.
  2664       double start_time_sec = os::elapsedTime();
  2665       size_t start_used_bytes = used();
  2667       g1_policy()->record_collection_pause_start(start_time_sec,
  2668                                                  start_used_bytes);
  2670       guarantee(_in_cset_fast_test == NULL, "invariant");
  2671       guarantee(_in_cset_fast_test_base == NULL, "invariant");
  2672       _in_cset_fast_test_length = max_regions();
  2673       _in_cset_fast_test_base =
  2674                              NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
  2675       memset(_in_cset_fast_test_base, false,
  2676                                      _in_cset_fast_test_length * sizeof(bool));
  2677       // We're biasing _in_cset_fast_test to avoid subtracting the
  2678       // beginning of the heap every time we want to index; basically
  2679       // it's the same with what we do with the card table.
  2680       _in_cset_fast_test = _in_cset_fast_test_base -
  2681               ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  2683 #if SCAN_ONLY_VERBOSE
  2684       _young_list->print();
  2685 #endif // SCAN_ONLY_VERBOSE
  2687       if (g1_policy()->should_initiate_conc_mark()) {
  2688         concurrent_mark()->checkpointRootsInitialPre();
  2690       save_marks();
  2692       // We must do this before any possible evacuation that should propagate
  2693       // marks.
  2694       if (mark_in_progress()) {
  2695         double start_time_sec = os::elapsedTime();
  2697         _cm->drainAllSATBBuffers();
  2698         double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  2699         g1_policy()->record_satb_drain_time(finish_mark_ms);
  2701       // Record the number of elements currently on the mark stack, so we
  2702       // only iterate over these.  (Since evacuation may add to the mark
  2703       // stack, doing more exposes race conditions.)  If no mark is in
  2704       // progress, this will be zero.
  2705       _cm->set_oops_do_bound();
  2707       assert(regions_accounted_for(), "Region leakage.");
  2709       if (mark_in_progress())
  2710         concurrent_mark()->newCSet();
  2712       // Now choose the CS.
  2713       g1_policy()->choose_collection_set();
  2715       // We may abandon a pause if we find no region that will fit in the MMU
  2716       // pause.
  2717       bool abandoned = (g1_policy()->collection_set() == NULL);
  2719       // Nothing to do if we were unable to choose a collection set.
  2720       if (!abandoned) {
  2721 #if G1_REM_SET_LOGGING
  2722         gclog_or_tty->print_cr("\nAfter pause, heap:");
  2723         print();
  2724 #endif
  2726         setup_surviving_young_words();
  2728         // Set up the gc allocation regions.
  2729         get_gc_alloc_regions();
  2731         // Actually do the work...
  2732         evacuate_collection_set();
  2733         free_collection_set(g1_policy()->collection_set());
  2734         g1_policy()->clear_collection_set();
  2736         FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
  2737         // this is more for peace of mind; we're nulling them here and
  2738         // we're expecting them to be null at the beginning of the next GC
  2739         _in_cset_fast_test = NULL;
  2740         _in_cset_fast_test_base = NULL;
  2742         cleanup_surviving_young_words();
  2744         if (g1_policy()->in_young_gc_mode()) {
  2745           _young_list->reset_sampled_info();
  2746           assert(check_young_list_empty(true),
  2747                  "young list should be empty");
  2749 #if SCAN_ONLY_VERBOSE
  2750           _young_list->print();
  2751 #endif // SCAN_ONLY_VERBOSE
  2753           g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  2754                                           _young_list->first_survivor_region(),
  2755                                           _young_list->last_survivor_region());
  2756           _young_list->reset_auxilary_lists();
  2758       } else {
  2759         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  2762       if (evacuation_failed()) {
  2763         _summary_bytes_used = recalculate_used();
  2764       } else {
  2765         // The "used" of the the collection set have already been subtracted
  2766         // when they were freed.  Add in the bytes evacuated.
  2767         _summary_bytes_used += g1_policy()->bytes_in_to_space();
  2770       if (g1_policy()->in_young_gc_mode() &&
  2771           g1_policy()->should_initiate_conc_mark()) {
  2772         concurrent_mark()->checkpointRootsInitialPost();
  2773         set_marking_started();
  2774         // CAUTION: after the doConcurrentMark() call below,
  2775         // the concurrent marking thread(s) could be running
  2776         // concurrently with us. Make sure that anything after
  2777         // this point does not assume that we are the only GC thread
  2778         // running. Note: of course, the actual marking work will
  2779         // not start until the safepoint itself is released in
  2780         // ConcurrentGCThread::safepoint_desynchronize().
  2781         doConcurrentMark();
  2784 #if SCAN_ONLY_VERBOSE
  2785       _young_list->print();
  2786 #endif // SCAN_ONLY_VERBOSE
  2788       double end_time_sec = os::elapsedTime();
  2789       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  2790       g1_policy()->record_pause_time_ms(pause_time_ms);
  2791       g1_policy()->record_collection_pause_end(abandoned);
  2793       assert(regions_accounted_for(), "Region leakage.");
  2795       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  2796         HandleMark hm;  // Discard invalid handles created during verification
  2797         gclog_or_tty->print(" VerifyAfterGC:");
  2798         prepare_for_verify();
  2799         Universe::verify(false);
  2802       if (was_enabled) ref_processor()->enable_discovery();
  2805         size_t expand_bytes = g1_policy()->expansion_amount();
  2806         if (expand_bytes > 0) {
  2807           size_t bytes_before = capacity();
  2808           expand(expand_bytes);
  2812       if (mark_in_progress()) {
  2813         concurrent_mark()->update_g1_committed();
  2816 #ifdef TRACESPINNING
  2817       ParallelTaskTerminator::print_termination_counts();
  2818 #endif
  2820       gc_epilogue(false);
  2823     assert(verify_region_lists(), "Bad region lists.");
  2825     if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
  2826       gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
  2827       print_tracing_info();
  2828       vm_exit(-1);
  2832   if (PrintHeapAtGC) {
  2833     Universe::print_heap_after_gc();
  2835   if (G1SummarizeRSetStats &&
  2836       (G1SummarizeRSetStatsPeriod > 0) &&
  2837       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  2838     g1_rem_set()->print_summary_info();
  2842 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
  2843   assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
  2844   // make sure we don't call set_gc_alloc_region() multiple times on
  2845   // the same region
  2846   assert(r == NULL || !r->is_gc_alloc_region(),
  2847          "shouldn't already be a GC alloc region");
  2848   HeapWord* original_top = NULL;
  2849   if (r != NULL)
  2850     original_top = r->top();
  2852   // We will want to record the used space in r as being there before gc.
  2853   // One we install it as a GC alloc region it's eligible for allocation.
  2854   // So record it now and use it later.
  2855   size_t r_used = 0;
  2856   if (r != NULL) {
  2857     r_used = r->used();
  2859     if (ParallelGCThreads > 0) {
  2860       // need to take the lock to guard against two threads calling
  2861       // get_gc_alloc_region concurrently (very unlikely but...)
  2862       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  2863       r->save_marks();
  2866   HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
  2867   _gc_alloc_regions[purpose] = r;
  2868   if (old_alloc_region != NULL) {
  2869     // Replace aliases too.
  2870     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  2871       if (_gc_alloc_regions[ap] == old_alloc_region) {
  2872         _gc_alloc_regions[ap] = r;
  2876   if (r != NULL) {
  2877     push_gc_alloc_region(r);
  2878     if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
  2879       // We are using a region as a GC alloc region after it has been used
  2880       // as a mutator allocation region during the current marking cycle.
  2881       // The mutator-allocated objects are currently implicitly marked, but
  2882       // when we move hr->next_top_at_mark_start() forward at the the end
  2883       // of the GC pause, they won't be.  We therefore mark all objects in
  2884       // the "gap".  We do this object-by-object, since marking densely
  2885       // does not currently work right with marking bitmap iteration.  This
  2886       // means we rely on TLAB filling at the start of pauses, and no
  2887       // "resuscitation" of filled TLAB's.  If we want to do this, we need
  2888       // to fix the marking bitmap iteration.
  2889       HeapWord* curhw = r->next_top_at_mark_start();
  2890       HeapWord* t = original_top;
  2892       while (curhw < t) {
  2893         oop cur = (oop)curhw;
  2894         // We'll assume parallel for generality.  This is rare code.
  2895         concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
  2896         curhw = curhw + cur->size();
  2898       assert(curhw == t, "Should have parsed correctly.");
  2900     if (G1PolicyVerbose > 1) {
  2901       gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
  2902                           "for survivors:", r->bottom(), original_top, r->end());
  2903       r->print();
  2905     g1_policy()->record_before_bytes(r_used);
  2909 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
  2910   assert(Thread::current()->is_VM_thread() ||
  2911          par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
  2912   assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
  2913          "Precondition.");
  2914   hr->set_is_gc_alloc_region(true);
  2915   hr->set_next_gc_alloc_region(_gc_alloc_region_list);
  2916   _gc_alloc_region_list = hr;
  2919 #ifdef G1_DEBUG
  2920 class FindGCAllocRegion: public HeapRegionClosure {
  2921 public:
  2922   bool doHeapRegion(HeapRegion* r) {
  2923     if (r->is_gc_alloc_region()) {
  2924       gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
  2925                              r->hrs_index(), r->bottom());
  2927     return false;
  2929 };
  2930 #endif // G1_DEBUG
  2932 void G1CollectedHeap::forget_alloc_region_list() {
  2933   assert(Thread::current()->is_VM_thread(), "Precondition");
  2934   while (_gc_alloc_region_list != NULL) {
  2935     HeapRegion* r = _gc_alloc_region_list;
  2936     assert(r->is_gc_alloc_region(), "Invariant.");
  2937     // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
  2938     // newly allocated data in order to be able to apply deferred updates
  2939     // before the GC is done for verification purposes (i.e to allow
  2940     // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
  2941     // collection.
  2942     r->ContiguousSpace::set_saved_mark();
  2943     _gc_alloc_region_list = r->next_gc_alloc_region();
  2944     r->set_next_gc_alloc_region(NULL);
  2945     r->set_is_gc_alloc_region(false);
  2946     if (r->is_survivor()) {
  2947       if (r->is_empty()) {
  2948         r->set_not_young();
  2949       } else {
  2950         _young_list->add_survivor_region(r);
  2953     if (r->is_empty()) {
  2954       ++_free_regions;
  2957 #ifdef G1_DEBUG
  2958   FindGCAllocRegion fa;
  2959   heap_region_iterate(&fa);
  2960 #endif // G1_DEBUG
  2964 bool G1CollectedHeap::check_gc_alloc_regions() {
  2965   // TODO: allocation regions check
  2966   return true;
  2969 void G1CollectedHeap::get_gc_alloc_regions() {
  2970   // First, let's check that the GC alloc region list is empty (it should)
  2971   assert(_gc_alloc_region_list == NULL, "invariant");
  2973   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  2974     assert(_gc_alloc_regions[ap] == NULL, "invariant");
  2975     assert(_gc_alloc_region_counts[ap] == 0, "invariant");
  2977     // Create new GC alloc regions.
  2978     HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
  2979     _retained_gc_alloc_regions[ap] = NULL;
  2981     if (alloc_region != NULL) {
  2982       assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
  2984       // let's make sure that the GC alloc region is not tagged as such
  2985       // outside a GC operation
  2986       assert(!alloc_region->is_gc_alloc_region(), "sanity");
  2988       if (alloc_region->in_collection_set() ||
  2989           alloc_region->top() == alloc_region->end() ||
  2990           alloc_region->top() == alloc_region->bottom()) {
  2991         // we will discard the current GC alloc region if it's in the
  2992         // collection set (it can happen!), if it's already full (no
  2993         // point in using it), or if it's empty (this means that it
  2994         // was emptied during a cleanup and it should be on the free
  2995         // list now).
  2997         alloc_region = NULL;
  3001     if (alloc_region == NULL) {
  3002       // we will get a new GC alloc region
  3003       alloc_region = newAllocRegionWithExpansion(ap, 0);
  3004     } else {
  3005       // the region was retained from the last collection
  3006       ++_gc_alloc_region_counts[ap];
  3009     if (alloc_region != NULL) {
  3010       assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
  3011       set_gc_alloc_region(ap, alloc_region);
  3014     assert(_gc_alloc_regions[ap] == NULL ||
  3015            _gc_alloc_regions[ap]->is_gc_alloc_region(),
  3016            "the GC alloc region should be tagged as such");
  3017     assert(_gc_alloc_regions[ap] == NULL ||
  3018            _gc_alloc_regions[ap] == _gc_alloc_region_list,
  3019            "the GC alloc region should be the same as the GC alloc list head");
  3021   // Set alternative regions for allocation purposes that have reached
  3022   // their limit.
  3023   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3024     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
  3025     if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
  3026       _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
  3029   assert(check_gc_alloc_regions(), "alloc regions messed up");
  3032 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
  3033   // We keep a separate list of all regions that have been alloc regions in
  3034   // the current collection pause. Forget that now. This method will
  3035   // untag the GC alloc regions and tear down the GC alloc region
  3036   // list. It's desirable that no regions are tagged as GC alloc
  3037   // outside GCs.
  3038   forget_alloc_region_list();
  3040   // The current alloc regions contain objs that have survived
  3041   // collection. Make them no longer GC alloc regions.
  3042   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3043     HeapRegion* r = _gc_alloc_regions[ap];
  3044     _retained_gc_alloc_regions[ap] = NULL;
  3045     _gc_alloc_region_counts[ap] = 0;
  3047     if (r != NULL) {
  3048       // we retain nothing on _gc_alloc_regions between GCs
  3049       set_gc_alloc_region(ap, NULL);
  3051       if (r->is_empty()) {
  3052         // we didn't actually allocate anything in it; let's just put
  3053         // it on the free list
  3054         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  3055         r->set_zero_fill_complete();
  3056         put_free_region_on_list_locked(r);
  3057       } else if (_retain_gc_alloc_region[ap] && !totally) {
  3058         // retain it so that we can use it at the beginning of the next GC
  3059         _retained_gc_alloc_regions[ap] = r;
  3065 #ifndef PRODUCT
  3066 // Useful for debugging
  3068 void G1CollectedHeap::print_gc_alloc_regions() {
  3069   gclog_or_tty->print_cr("GC alloc regions");
  3070   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3071     HeapRegion* r = _gc_alloc_regions[ap];
  3072     if (r == NULL) {
  3073       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT, ap, NULL);
  3074     } else {
  3075       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT" "SIZE_FORMAT,
  3076                              ap, r->bottom(), r->used());
  3080 #endif // PRODUCT
  3082 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  3083   _drain_in_progress = false;
  3084   set_evac_failure_closure(cl);
  3085   _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3088 void G1CollectedHeap::finalize_for_evac_failure() {
  3089   assert(_evac_failure_scan_stack != NULL &&
  3090          _evac_failure_scan_stack->length() == 0,
  3091          "Postcondition");
  3092   assert(!_drain_in_progress, "Postcondition");
  3093   // Don't have to delete, since the scan stack is a resource object.
  3094   _evac_failure_scan_stack = NULL;
  3099 // *** Sequential G1 Evacuation
  3101 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
  3102   HeapRegion* alloc_region = _gc_alloc_regions[purpose];
  3103   // let the caller handle alloc failure
  3104   if (alloc_region == NULL) return NULL;
  3105   assert(isHumongous(word_size) || !alloc_region->isHumongous(),
  3106          "Either the object is humongous or the region isn't");
  3107   HeapWord* block = alloc_region->allocate(word_size);
  3108   if (block == NULL) {
  3109     block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
  3111   return block;
  3114 class G1IsAliveClosure: public BoolObjectClosure {
  3115   G1CollectedHeap* _g1;
  3116 public:
  3117   G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3118   void do_object(oop p) { assert(false, "Do not call."); }
  3119   bool do_object_b(oop p) {
  3120     // It is reachable if it is outside the collection set, or is inside
  3121     // and forwarded.
  3123 #ifdef G1_DEBUG
  3124     gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
  3125                            (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
  3126                            !_g1->obj_in_cs(p) || p->is_forwarded());
  3127 #endif // G1_DEBUG
  3129     return !_g1->obj_in_cs(p) || p->is_forwarded();
  3131 };
  3133 class G1KeepAliveClosure: public OopClosure {
  3134   G1CollectedHeap* _g1;
  3135 public:
  3136   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3137   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  3138   void do_oop(      oop* p) {
  3139     oop obj = *p;
  3140 #ifdef G1_DEBUG
  3141     if (PrintGC && Verbose) {
  3142       gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
  3143                              p, (void*) obj, (void*) *p);
  3145 #endif // G1_DEBUG
  3147     if (_g1->obj_in_cs(obj)) {
  3148       assert( obj->is_forwarded(), "invariant" );
  3149       *p = obj->forwardee();
  3150 #ifdef G1_DEBUG
  3151       gclog_or_tty->print_cr("     in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
  3152                              (void*) obj, (void*) *p);
  3153 #endif // G1_DEBUG
  3156 };
  3158 class UpdateRSetImmediate : public OopsInHeapRegionClosure {
  3159 private:
  3160   G1CollectedHeap* _g1;
  3161   G1RemSet* _g1_rem_set;
  3162 public:
  3163   UpdateRSetImmediate(G1CollectedHeap* g1) :
  3164     _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
  3166   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  3167   virtual void do_oop(      oop* p) { do_oop_work(p); }
  3168   template <class T> void do_oop_work(T* p) {
  3169     assert(_from->is_in_reserved(p), "paranoia");
  3170     T heap_oop = oopDesc::load_heap_oop(p);
  3171     if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
  3172       _g1_rem_set->par_write_ref(_from, p, 0);
  3175 };
  3177 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
  3178 private:
  3179   G1CollectedHeap* _g1;
  3180   DirtyCardQueue *_dcq;
  3181   CardTableModRefBS* _ct_bs;
  3183 public:
  3184   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
  3185     _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
  3187   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  3188   virtual void do_oop(      oop* p) { do_oop_work(p); }
  3189   template <class T> void do_oop_work(T* p) {
  3190     assert(_from->is_in_reserved(p), "paranoia");
  3191     if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
  3192         !_from->is_survivor()) {
  3193       size_t card_index = _ct_bs->index_for(p);
  3194       if (_ct_bs->mark_card_deferred(card_index)) {
  3195         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
  3199 };
  3203 class RemoveSelfPointerClosure: public ObjectClosure {
  3204 private:
  3205   G1CollectedHeap* _g1;
  3206   ConcurrentMark* _cm;
  3207   HeapRegion* _hr;
  3208   size_t _prev_marked_bytes;
  3209   size_t _next_marked_bytes;
  3210   OopsInHeapRegionClosure *_cl;
  3211 public:
  3212   RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
  3213     _g1(g1), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
  3214     _next_marked_bytes(0), _cl(cl) {}
  3216   size_t prev_marked_bytes() { return _prev_marked_bytes; }
  3217   size_t next_marked_bytes() { return _next_marked_bytes; }
  3219   // The original idea here was to coalesce evacuated and dead objects.
  3220   // However that caused complications with the block offset table (BOT).
  3221   // In particular if there were two TLABs, one of them partially refined.
  3222   // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
  3223   // The BOT entries of the unrefined part of TLAB_2 point to the start
  3224   // of TLAB_2. If the last object of the TLAB_1 and the first object
  3225   // of TLAB_2 are coalesced, then the cards of the unrefined part
  3226   // would point into middle of the filler object.
  3227   //
  3228   // The current approach is to not coalesce and leave the BOT contents intact.
  3229   void do_object(oop obj) {
  3230     if (obj->is_forwarded() && obj->forwardee() == obj) {
  3231       // The object failed to move.
  3232       assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
  3233       _cm->markPrev(obj);
  3234       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3235       _prev_marked_bytes += (obj->size() * HeapWordSize);
  3236       if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
  3237         _cm->markAndGrayObjectIfNecessary(obj);
  3239       obj->set_mark(markOopDesc::prototype());
  3240       // While we were processing RSet buffers during the
  3241       // collection, we actually didn't scan any cards on the
  3242       // collection set, since we didn't want to update remebered
  3243       // sets with entries that point into the collection set, given
  3244       // that live objects fromthe collection set are about to move
  3245       // and such entries will be stale very soon. This change also
  3246       // dealt with a reliability issue which involved scanning a
  3247       // card in the collection set and coming across an array that
  3248       // was being chunked and looking malformed. The problem is
  3249       // that, if evacuation fails, we might have remembered set
  3250       // entries missing given that we skipped cards on the
  3251       // collection set. So, we'll recreate such entries now.
  3252       obj->oop_iterate(_cl);
  3253       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3254     } else {
  3255       // The object has been either evacuated or is dead. Fill it with a
  3256       // dummy object.
  3257       MemRegion mr((HeapWord*)obj, obj->size());
  3258       CollectedHeap::fill_with_object(mr);
  3259       _cm->clearRangeBothMaps(mr);
  3262 };
  3264 void G1CollectedHeap::remove_self_forwarding_pointers() {
  3265   UpdateRSetImmediate immediate_update(_g1h);
  3266   DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
  3267   UpdateRSetDeferred deferred_update(_g1h, &dcq);
  3268   OopsInHeapRegionClosure *cl;
  3269   if (G1DeferredRSUpdate) {
  3270     cl = &deferred_update;
  3271   } else {
  3272     cl = &immediate_update;
  3274   HeapRegion* cur = g1_policy()->collection_set();
  3275   while (cur != NULL) {
  3276     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3278     RemoveSelfPointerClosure rspc(_g1h, cl);
  3279     if (cur->evacuation_failed()) {
  3280       assert(cur->in_collection_set(), "bad CS");
  3281       cl->set_region(cur);
  3282       cur->object_iterate(&rspc);
  3284       // A number of manipulations to make the TAMS be the current top,
  3285       // and the marked bytes be the ones observed in the iteration.
  3286       if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
  3287         // The comments below are the postconditions achieved by the
  3288         // calls.  Note especially the last such condition, which says that
  3289         // the count of marked bytes has been properly restored.
  3290         cur->note_start_of_marking(false);
  3291         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3292         cur->add_to_marked_bytes(rspc.prev_marked_bytes());
  3293         // _next_marked_bytes == prev_marked_bytes.
  3294         cur->note_end_of_marking();
  3295         // _prev_top_at_mark_start == top(),
  3296         // _prev_marked_bytes == prev_marked_bytes
  3298       // If there is no mark in progress, we modified the _next variables
  3299       // above needlessly, but harmlessly.
  3300       if (_g1h->mark_in_progress()) {
  3301         cur->note_start_of_marking(false);
  3302         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3303         // _next_marked_bytes == next_marked_bytes.
  3306       // Now make sure the region has the right index in the sorted array.
  3307       g1_policy()->note_change_in_marked_bytes(cur);
  3309     cur = cur->next_in_collection_set();
  3311   assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3313   // Now restore saved marks, if any.
  3314   if (_objs_with_preserved_marks != NULL) {
  3315     assert(_preserved_marks_of_objs != NULL, "Both or none.");
  3316     assert(_objs_with_preserved_marks->length() ==
  3317            _preserved_marks_of_objs->length(), "Both or none.");
  3318     guarantee(_objs_with_preserved_marks->length() ==
  3319               _preserved_marks_of_objs->length(), "Both or none.");
  3320     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
  3321       oop obj   = _objs_with_preserved_marks->at(i);
  3322       markOop m = _preserved_marks_of_objs->at(i);
  3323       obj->set_mark(m);
  3325     // Delete the preserved marks growable arrays (allocated on the C heap).
  3326     delete _objs_with_preserved_marks;
  3327     delete _preserved_marks_of_objs;
  3328     _objs_with_preserved_marks = NULL;
  3329     _preserved_marks_of_objs = NULL;
  3333 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  3334   _evac_failure_scan_stack->push(obj);
  3337 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  3338   assert(_evac_failure_scan_stack != NULL, "precondition");
  3340   while (_evac_failure_scan_stack->length() > 0) {
  3341      oop obj = _evac_failure_scan_stack->pop();
  3342      _evac_failure_closure->set_region(heap_region_containing(obj));
  3343      obj->oop_iterate_backwards(_evac_failure_closure);
  3347 void G1CollectedHeap::handle_evacuation_failure(oop old) {
  3348   markOop m = old->mark();
  3349   // forward to self
  3350   assert(!old->is_forwarded(), "precondition");
  3352   old->forward_to(old);
  3353   handle_evacuation_failure_common(old, m);
  3356 oop
  3357 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  3358                                                oop old) {
  3359   markOop m = old->mark();
  3360   oop forward_ptr = old->forward_to_atomic(old);
  3361   if (forward_ptr == NULL) {
  3362     // Forward-to-self succeeded.
  3363     if (_evac_failure_closure != cl) {
  3364       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  3365       assert(!_drain_in_progress,
  3366              "Should only be true while someone holds the lock.");
  3367       // Set the global evac-failure closure to the current thread's.
  3368       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  3369       set_evac_failure_closure(cl);
  3370       // Now do the common part.
  3371       handle_evacuation_failure_common(old, m);
  3372       // Reset to NULL.
  3373       set_evac_failure_closure(NULL);
  3374     } else {
  3375       // The lock is already held, and this is recursive.
  3376       assert(_drain_in_progress, "This should only be the recursive case.");
  3377       handle_evacuation_failure_common(old, m);
  3379     return old;
  3380   } else {
  3381     // Someone else had a place to copy it.
  3382     return forward_ptr;
  3386 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  3387   set_evacuation_failed(true);
  3389   preserve_mark_if_necessary(old, m);
  3391   HeapRegion* r = heap_region_containing(old);
  3392   if (!r->evacuation_failed()) {
  3393     r->set_evacuation_failed(true);
  3394     if (G1PrintRegions) {
  3395       gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
  3396                           "["PTR_FORMAT","PTR_FORMAT")\n",
  3397                           r, r->bottom(), r->end());
  3401   push_on_evac_failure_scan_stack(old);
  3403   if (!_drain_in_progress) {
  3404     // prevent recursion in copy_to_survivor_space()
  3405     _drain_in_progress = true;
  3406     drain_evac_failure_scan_stack();
  3407     _drain_in_progress = false;
  3411 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  3412   if (m != markOopDesc::prototype()) {
  3413     if (_objs_with_preserved_marks == NULL) {
  3414       assert(_preserved_marks_of_objs == NULL, "Both or none.");
  3415       _objs_with_preserved_marks =
  3416         new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3417       _preserved_marks_of_objs =
  3418         new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
  3420     _objs_with_preserved_marks->push(obj);
  3421     _preserved_marks_of_objs->push(m);
  3425 // *** Parallel G1 Evacuation
  3427 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  3428                                                   size_t word_size) {
  3429   HeapRegion* alloc_region = _gc_alloc_regions[purpose];
  3430   // let the caller handle alloc failure
  3431   if (alloc_region == NULL) return NULL;
  3433   HeapWord* block = alloc_region->par_allocate(word_size);
  3434   if (block == NULL) {
  3435     MutexLockerEx x(par_alloc_during_gc_lock(),
  3436                     Mutex::_no_safepoint_check_flag);
  3437     block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
  3439   return block;
  3442 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
  3443                                             bool par) {
  3444   // Another thread might have obtained alloc_region for the given
  3445   // purpose, and might be attempting to allocate in it, and might
  3446   // succeed.  Therefore, we can't do the "finalization" stuff on the
  3447   // region below until we're sure the last allocation has happened.
  3448   // We ensure this by allocating the remaining space with a garbage
  3449   // object.
  3450   if (par) par_allocate_remaining_space(alloc_region);
  3451   // Now we can do the post-GC stuff on the region.
  3452   alloc_region->note_end_of_copying();
  3453   g1_policy()->record_after_bytes(alloc_region->used());
  3456 HeapWord*
  3457 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
  3458                                          HeapRegion*    alloc_region,
  3459                                          bool           par,
  3460                                          size_t         word_size) {
  3461   HeapWord* block = NULL;
  3462   // In the parallel case, a previous thread to obtain the lock may have
  3463   // already assigned a new gc_alloc_region.
  3464   if (alloc_region != _gc_alloc_regions[purpose]) {
  3465     assert(par, "But should only happen in parallel case.");
  3466     alloc_region = _gc_alloc_regions[purpose];
  3467     if (alloc_region == NULL) return NULL;
  3468     block = alloc_region->par_allocate(word_size);
  3469     if (block != NULL) return block;
  3470     // Otherwise, continue; this new region is empty, too.
  3472   assert(alloc_region != NULL, "We better have an allocation region");
  3473   retire_alloc_region(alloc_region, par);
  3475   if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
  3476     // Cannot allocate more regions for the given purpose.
  3477     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
  3478     // Is there an alternative?
  3479     if (purpose != alt_purpose) {
  3480       HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
  3481       // Has not the alternative region been aliased?
  3482       if (alloc_region != alt_region && alt_region != NULL) {
  3483         // Try to allocate in the alternative region.
  3484         if (par) {
  3485           block = alt_region->par_allocate(word_size);
  3486         } else {
  3487           block = alt_region->allocate(word_size);
  3489         // Make an alias.
  3490         _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
  3491         if (block != NULL) {
  3492           return block;
  3494         retire_alloc_region(alt_region, par);
  3496       // Both the allocation region and the alternative one are full
  3497       // and aliased, replace them with a new allocation region.
  3498       purpose = alt_purpose;
  3499     } else {
  3500       set_gc_alloc_region(purpose, NULL);
  3501       return NULL;
  3505   // Now allocate a new region for allocation.
  3506   alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
  3508   // let the caller handle alloc failure
  3509   if (alloc_region != NULL) {
  3511     assert(check_gc_alloc_regions(), "alloc regions messed up");
  3512     assert(alloc_region->saved_mark_at_top(),
  3513            "Mark should have been saved already.");
  3514     // We used to assert that the region was zero-filled here, but no
  3515     // longer.
  3517     // This must be done last: once it's installed, other regions may
  3518     // allocate in it (without holding the lock.)
  3519     set_gc_alloc_region(purpose, alloc_region);
  3521     if (par) {
  3522       block = alloc_region->par_allocate(word_size);
  3523     } else {
  3524       block = alloc_region->allocate(word_size);
  3526     // Caller handles alloc failure.
  3527   } else {
  3528     // This sets other apis using the same old alloc region to NULL, also.
  3529     set_gc_alloc_region(purpose, NULL);
  3531   return block;  // May be NULL.
  3534 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
  3535   HeapWord* block = NULL;
  3536   size_t free_words;
  3537   do {
  3538     free_words = r->free()/HeapWordSize;
  3539     // If there's too little space, no one can allocate, so we're done.
  3540     if (free_words < (size_t)oopDesc::header_size()) return;
  3541     // Otherwise, try to claim it.
  3542     block = r->par_allocate(free_words);
  3543   } while (block == NULL);
  3544   fill_with_object(block, free_words);
  3547 #ifndef PRODUCT
  3548 bool GCLabBitMapClosure::do_bit(size_t offset) {
  3549   HeapWord* addr = _bitmap->offsetToHeapWord(offset);
  3550   guarantee(_cm->isMarked(oop(addr)), "it should be!");
  3551   return true;
  3553 #endif // PRODUCT
  3555 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
  3556   : _g1h(g1h),
  3557     _refs(g1h->task_queue(queue_num)),
  3558     _dcq(&g1h->dirty_card_queue_set()),
  3559     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
  3560     _g1_rem(g1h->g1_rem_set()),
  3561     _hash_seed(17), _queue_num(queue_num),
  3562     _term_attempts(0),
  3563     _age_table(false),
  3564 #if G1_DETAILED_STATS
  3565     _pushes(0), _pops(0), _steals(0),
  3566     _steal_attempts(0),  _overflow_pushes(0),
  3567 #endif
  3568     _strong_roots_time(0), _term_time(0),
  3569     _alloc_buffer_waste(0), _undo_waste(0)
  3571   // we allocate G1YoungSurvRateNumRegions plus one entries, since
  3572   // we "sacrifice" entry 0 to keep track of surviving bytes for
  3573   // non-young regions (where the age is -1)
  3574   // We also add a few elements at the beginning and at the end in
  3575   // an attempt to eliminate cache contention
  3576   size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
  3577   size_t array_length = PADDING_ELEM_NUM +
  3578                         real_length +
  3579                         PADDING_ELEM_NUM;
  3580   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
  3581   if (_surviving_young_words_base == NULL)
  3582     vm_exit_out_of_memory(array_length * sizeof(size_t),
  3583                           "Not enough space for young surv histo.");
  3584   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  3585   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  3587   _overflowed_refs = new OverflowQueue(10);
  3589   _start = os::elapsedTime();
  3592 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  3593   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  3594   _par_scan_state(par_scan_state) { }
  3596 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
  3597   // This is called _after_ do_oop_work has been called, hence after
  3598   // the object has been relocated to its new location and *p points
  3599   // to its new location.
  3601   T heap_oop = oopDesc::load_heap_oop(p);
  3602   if (!oopDesc::is_null(heap_oop)) {
  3603     oop obj = oopDesc::decode_heap_oop(heap_oop);
  3604     assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
  3605            "shouldn't still be in the CSet if evacuation didn't fail.");
  3606     HeapWord* addr = (HeapWord*)obj;
  3607     if (_g1->is_in_g1_reserved(addr))
  3608       _cm->grayRoot(oop(addr));
  3612 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
  3613   size_t    word_sz = old->size();
  3614   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  3615   // +1 to make the -1 indexes valid...
  3616   int       young_index = from_region->young_index_in_cset()+1;
  3617   assert( (from_region->is_young() && young_index > 0) ||
  3618           (!from_region->is_young() && young_index == 0), "invariant" );
  3619   G1CollectorPolicy* g1p = _g1->g1_policy();
  3620   markOop m = old->mark();
  3621   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  3622                                            : m->age();
  3623   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  3624                                                              word_sz);
  3625   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
  3626   oop       obj     = oop(obj_ptr);
  3628   if (obj_ptr == NULL) {
  3629     // This will either forward-to-self, or detect that someone else has
  3630     // installed a forwarding pointer.
  3631     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  3632     return _g1->handle_evacuation_failure_par(cl, old);
  3635   // We're going to allocate linearly, so might as well prefetch ahead.
  3636   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
  3638   oop forward_ptr = old->forward_to_atomic(obj);
  3639   if (forward_ptr == NULL) {
  3640     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
  3641     if (g1p->track_object_age(alloc_purpose)) {
  3642       // We could simply do obj->incr_age(). However, this causes a
  3643       // performance issue. obj->incr_age() will first check whether
  3644       // the object has a displaced mark by checking its mark word;
  3645       // getting the mark word from the new location of the object
  3646       // stalls. So, given that we already have the mark word and we
  3647       // are about to install it anyway, it's better to increase the
  3648       // age on the mark word, when the object does not have a
  3649       // displaced mark word. We're not expecting many objects to have
  3650       // a displaced marked word, so that case is not optimized
  3651       // further (it could be...) and we simply call obj->incr_age().
  3653       if (m->has_displaced_mark_helper()) {
  3654         // in this case, we have to install the mark word first,
  3655         // otherwise obj looks to be forwarded (the old mark word,
  3656         // which contains the forward pointer, was copied)
  3657         obj->set_mark(m);
  3658         obj->incr_age();
  3659       } else {
  3660         m = m->incr_age();
  3661         obj->set_mark(m);
  3663       _par_scan_state->age_table()->add(obj, word_sz);
  3664     } else {
  3665       obj->set_mark(m);
  3668     // preserve "next" mark bit
  3669     if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
  3670       if (!use_local_bitmaps ||
  3671           !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
  3672         // if we couldn't mark it on the local bitmap (this happens when
  3673         // the object was not allocated in the GCLab), we have to bite
  3674         // the bullet and do the standard parallel mark
  3675         _cm->markAndGrayObjectIfNecessary(obj);
  3677 #if 1
  3678       if (_g1->isMarkedNext(old)) {
  3679         _cm->nextMarkBitMap()->parClear((HeapWord*)old);
  3681 #endif
  3684     size_t* surv_young_words = _par_scan_state->surviving_young_words();
  3685     surv_young_words[young_index] += word_sz;
  3687     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
  3688       arrayOop(old)->set_length(0);
  3689       oop* old_p = set_partial_array_mask(old);
  3690       _par_scan_state->push_on_queue(old_p);
  3691     } else {
  3692       // No point in using the slower heap_region_containing() method,
  3693       // given that we know obj is in the heap.
  3694       _scanner->set_region(_g1->heap_region_containing_raw(obj));
  3695       obj->oop_iterate_backwards(_scanner);
  3697   } else {
  3698     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
  3699     obj = forward_ptr;
  3701   return obj;
  3704 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test>
  3705 template <class T>
  3706 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test>
  3707 ::do_oop_work(T* p) {
  3708   oop obj = oopDesc::load_decode_heap_oop(p);
  3709   assert(barrier != G1BarrierRS || obj != NULL,
  3710          "Precondition: G1BarrierRS implies obj is nonNull");
  3712   // The only time we skip the cset test is when we're scanning
  3713   // references popped from the queue. And we only push on the queue
  3714   // references that we know point into the cset, so no point in
  3715   // checking again. But we'll leave an assert here for peace of mind.
  3716   assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
  3718   // here the null check is implicit in the cset_fast_test() test
  3719   if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
  3720 #if G1_REM_SET_LOGGING
  3721     gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
  3722                            "into CS.", p, (void*) obj);
  3723 #endif
  3724     if (obj->is_forwarded()) {
  3725       oopDesc::encode_store_heap_oop(p, obj->forwardee());
  3726     } else {
  3727       oop copy_oop = copy_to_survivor_space(obj);
  3728       oopDesc::encode_store_heap_oop(p, copy_oop);
  3730     // When scanning the RS, we only care about objs in CS.
  3731     if (barrier == G1BarrierRS) {
  3732       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  3736   // When scanning moved objs, must look at all oops.
  3737   if (barrier == G1BarrierEvac && obj != NULL) {
  3738     _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  3741   if (do_gen_barrier && obj != NULL) {
  3742     par_do_barrier(p);
  3746 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
  3747 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p);
  3749 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
  3750   assert(has_partial_array_mask(p), "invariant");
  3751   oop old = clear_partial_array_mask(p);
  3752   assert(old->is_objArray(), "must be obj array");
  3753   assert(old->is_forwarded(), "must be forwarded");
  3754   assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
  3756   objArrayOop obj = objArrayOop(old->forwardee());
  3757   assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
  3758   // Process ParGCArrayScanChunk elements now
  3759   // and push the remainder back onto queue
  3760   int start     = arrayOop(old)->length();
  3761   int end       = obj->length();
  3762   int remainder = end - start;
  3763   assert(start <= end, "just checking");
  3764   if (remainder > 2 * ParGCArrayScanChunk) {
  3765     // Test above combines last partial chunk with a full chunk
  3766     end = start + ParGCArrayScanChunk;
  3767     arrayOop(old)->set_length(end);
  3768     // Push remainder.
  3769     oop* old_p = set_partial_array_mask(old);
  3770     assert(arrayOop(old)->length() < obj->length(), "Empty push?");
  3771     _par_scan_state->push_on_queue(old_p);
  3772   } else {
  3773     // Restore length so that the heap remains parsable in
  3774     // case of evacuation failure.
  3775     arrayOop(old)->set_length(end);
  3777   _scanner.set_region(_g1->heap_region_containing_raw(obj));
  3778   // process our set of indices (include header in first chunk)
  3779   obj->oop_iterate_range(&_scanner, start, end);
  3782 class G1ParEvacuateFollowersClosure : public VoidClosure {
  3783 protected:
  3784   G1CollectedHeap*              _g1h;
  3785   G1ParScanThreadState*         _par_scan_state;
  3786   RefToScanQueueSet*            _queues;
  3787   ParallelTaskTerminator*       _terminator;
  3789   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  3790   RefToScanQueueSet*      queues()         { return _queues; }
  3791   ParallelTaskTerminator* terminator()     { return _terminator; }
  3793 public:
  3794   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  3795                                 G1ParScanThreadState* par_scan_state,
  3796                                 RefToScanQueueSet* queues,
  3797                                 ParallelTaskTerminator* terminator)
  3798     : _g1h(g1h), _par_scan_state(par_scan_state),
  3799       _queues(queues), _terminator(terminator) {}
  3801   void do_void() {
  3802     G1ParScanThreadState* pss = par_scan_state();
  3803     while (true) {
  3804       pss->trim_queue();
  3805       IF_G1_DETAILED_STATS(pss->note_steal_attempt());
  3807       StarTask stolen_task;
  3808       if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  3809         IF_G1_DETAILED_STATS(pss->note_steal());
  3811         // slightly paranoid tests; I'm trying to catch potential
  3812         // problems before we go into push_on_queue to know where the
  3813         // problem is coming from
  3814         assert((oop*)stolen_task != NULL, "Error");
  3815         if (stolen_task.is_narrow()) {
  3816           assert(UseCompressedOops, "Error");
  3817           narrowOop* p = (narrowOop*) stolen_task;
  3818           assert(has_partial_array_mask(p) ||
  3819                  _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error");
  3820           pss->push_on_queue(p);
  3821         } else {
  3822           oop* p = (oop*) stolen_task;
  3823           assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error");
  3824           pss->push_on_queue(p);
  3826         continue;
  3828       pss->start_term_time();
  3829       if (terminator()->offer_termination()) break;
  3830       pss->end_term_time();
  3832     pss->end_term_time();
  3833     pss->retire_alloc_buffers();
  3835 };
  3837 class G1ParTask : public AbstractGangTask {
  3838 protected:
  3839   G1CollectedHeap*       _g1h;
  3840   RefToScanQueueSet      *_queues;
  3841   ParallelTaskTerminator _terminator;
  3842   int _n_workers;
  3844   Mutex _stats_lock;
  3845   Mutex* stats_lock() { return &_stats_lock; }
  3847   size_t getNCards() {
  3848     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  3849       / G1BlockOffsetSharedArray::N_bytes;
  3852 public:
  3853   G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
  3854     : AbstractGangTask("G1 collection"),
  3855       _g1h(g1h),
  3856       _queues(task_queues),
  3857       _terminator(workers, _queues),
  3858       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
  3859       _n_workers(workers)
  3860   {}
  3862   RefToScanQueueSet* queues() { return _queues; }
  3864   RefToScanQueue *work_queue(int i) {
  3865     return queues()->queue(i);
  3868   void work(int i) {
  3869     if (i >= _n_workers) return;  // no work needed this round
  3870     ResourceMark rm;
  3871     HandleMark   hm;
  3873     G1ParScanThreadState            pss(_g1h, i);
  3874     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
  3875     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
  3876     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
  3878     pss.set_evac_closure(&scan_evac_cl);
  3879     pss.set_evac_failure_closure(&evac_failure_cl);
  3880     pss.set_partial_scan_closure(&partial_scan_cl);
  3882     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  3883     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  3884     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
  3886     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  3887     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  3888     G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  3890     OopsInHeapRegionClosure        *scan_root_cl;
  3891     OopsInHeapRegionClosure        *scan_perm_cl;
  3892     OopsInHeapRegionClosure        *scan_so_cl;
  3894     if (_g1h->g1_policy()->should_initiate_conc_mark()) {
  3895       scan_root_cl = &scan_mark_root_cl;
  3896       scan_perm_cl = &scan_mark_perm_cl;
  3897       scan_so_cl   = &scan_mark_heap_rs_cl;
  3898     } else {
  3899       scan_root_cl = &only_scan_root_cl;
  3900       scan_perm_cl = &only_scan_perm_cl;
  3901       scan_so_cl   = &only_scan_heap_rs_cl;
  3904     pss.start_strong_roots();
  3905     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  3906                                   SharedHeap::SO_AllClasses,
  3907                                   scan_root_cl,
  3908                                   &only_scan_heap_rs_cl,
  3909                                   scan_so_cl,
  3910                                   scan_perm_cl,
  3911                                   i);
  3912     pss.end_strong_roots();
  3914       double start = os::elapsedTime();
  3915       G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  3916       evac.do_void();
  3917       double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  3918       double term_ms = pss.term_time()*1000.0;
  3919       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
  3920       _g1h->g1_policy()->record_termination_time(i, term_ms);
  3922     if (G1UseSurvivorSpaces) {
  3923       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  3925     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  3927     // Clean up any par-expanded rem sets.
  3928     HeapRegionRemSet::par_cleanup();
  3930     MutexLocker x(stats_lock());
  3931     if (ParallelGCVerbose) {
  3932       gclog_or_tty->print("Thread %d complete:\n", i);
  3933 #if G1_DETAILED_STATS
  3934       gclog_or_tty->print("  Pushes: %7d    Pops: %7d   Overflows: %7d   Steals %7d (in %d attempts)\n",
  3935                           pss.pushes(),
  3936                           pss.pops(),
  3937                           pss.overflow_pushes(),
  3938                           pss.steals(),
  3939                           pss.steal_attempts());
  3940 #endif
  3941       double elapsed      = pss.elapsed();
  3942       double strong_roots = pss.strong_roots_time();
  3943       double term         = pss.term_time();
  3944       gclog_or_tty->print("  Elapsed: %7.2f ms.\n"
  3945                           "    Strong roots: %7.2f ms (%6.2f%%)\n"
  3946                           "    Termination:  %7.2f ms (%6.2f%%) (in %d entries)\n",
  3947                           elapsed * 1000.0,
  3948                           strong_roots * 1000.0, (strong_roots*100.0/elapsed),
  3949                           term * 1000.0, (term*100.0/elapsed),
  3950                           pss.term_attempts());
  3951       size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste();
  3952       gclog_or_tty->print("  Waste: %8dK\n"
  3953                  "    Alloc Buffer: %8dK\n"
  3954                  "    Undo: %8dK\n",
  3955                  (total_waste * HeapWordSize) / K,
  3956                  (pss.alloc_buffer_waste() * HeapWordSize) / K,
  3957                  (pss.undo_waste() * HeapWordSize) / K);
  3960     assert(pss.refs_to_scan() == 0, "Task queue should be empty");
  3961     assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
  3963 };
  3965 // *** Common G1 Evacuation Stuff
  3967 void
  3968 G1CollectedHeap::
  3969 g1_process_strong_roots(bool collecting_perm_gen,
  3970                         SharedHeap::ScanningOption so,
  3971                         OopClosure* scan_non_heap_roots,
  3972                         OopsInHeapRegionClosure* scan_rs,
  3973                         OopsInHeapRegionClosure* scan_so,
  3974                         OopsInGenClosure* scan_perm,
  3975                         int worker_i) {
  3976   // First scan the strong roots, including the perm gen.
  3977   double ext_roots_start = os::elapsedTime();
  3978   double closure_app_time_sec = 0.0;
  3980   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  3981   BufferingOopsInGenClosure buf_scan_perm(scan_perm);
  3982   buf_scan_perm.set_generation(perm_gen());
  3984   process_strong_roots(collecting_perm_gen, so,
  3985                        &buf_scan_non_heap_roots,
  3986                        &buf_scan_perm);
  3987   // Finish up any enqueued closure apps.
  3988   buf_scan_non_heap_roots.done();
  3989   buf_scan_perm.done();
  3990   double ext_roots_end = os::elapsedTime();
  3991   g1_policy()->reset_obj_copy_time(worker_i);
  3992   double obj_copy_time_sec =
  3993     buf_scan_non_heap_roots.closure_app_seconds() +
  3994     buf_scan_perm.closure_app_seconds();
  3995   g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  3996   double ext_root_time_ms =
  3997     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  3998   g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  4000   // Scan strong roots in mark stack.
  4001   if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
  4002     concurrent_mark()->oops_do(scan_non_heap_roots);
  4004   double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
  4005   g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
  4007   // XXX What should this be doing in the parallel case?
  4008   g1_policy()->record_collection_pause_end_CH_strong_roots();
  4009   if (scan_so != NULL) {
  4010     scan_scan_only_set(scan_so, worker_i);
  4012   // Now scan the complement of the collection set.
  4013   if (scan_rs != NULL) {
  4014     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
  4016   // Finish with the ref_processor roots.
  4017   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4018     ref_processor()->oops_do(scan_non_heap_roots);
  4020   g1_policy()->record_collection_pause_end_G1_strong_roots();
  4021   _process_strong_tasks->all_tasks_completed();
  4024 void
  4025 G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
  4026                                        OopsInHeapRegionClosure* oc,
  4027                                        int worker_i) {
  4028   HeapWord* startAddr = r->bottom();
  4029   HeapWord* endAddr = r->used_region().end();
  4031   oc->set_region(r);
  4033   HeapWord* p = r->bottom();
  4034   HeapWord* t = r->top();
  4035   guarantee( p == r->next_top_at_mark_start(), "invariant" );
  4036   while (p < t) {
  4037     oop obj = oop(p);
  4038     p += obj->oop_iterate(oc);
  4042 void
  4043 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
  4044                                     int worker_i) {
  4045   double start = os::elapsedTime();
  4047   BufferingOopsInHeapRegionClosure boc(oc);
  4049   FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
  4050   FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
  4052   OopsInHeapRegionClosure *foc;
  4053   if (g1_policy()->should_initiate_conc_mark())
  4054     foc = &scan_and_mark;
  4055   else
  4056     foc = &scan_only;
  4058   HeapRegion* hr;
  4059   int n = 0;
  4060   while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
  4061     scan_scan_only_region(hr, foc, worker_i);
  4062     ++n;
  4064   boc.done();
  4066   double closure_app_s = boc.closure_app_seconds();
  4067   g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
  4068   double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
  4069   g1_policy()->record_scan_only_time(worker_i, ms, n);
  4072 void
  4073 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
  4074                                        OopClosure* non_root_closure) {
  4075   SharedHeap::process_weak_roots(root_closure, non_root_closure);
  4079 class SaveMarksClosure: public HeapRegionClosure {
  4080 public:
  4081   bool doHeapRegion(HeapRegion* r) {
  4082     r->save_marks();
  4083     return false;
  4085 };
  4087 void G1CollectedHeap::save_marks() {
  4088   if (ParallelGCThreads == 0) {
  4089     SaveMarksClosure sm;
  4090     heap_region_iterate(&sm);
  4092   // We do this even in the parallel case
  4093   perm_gen()->save_marks();
  4096 void G1CollectedHeap::evacuate_collection_set() {
  4097   set_evacuation_failed(false);
  4099   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  4100   concurrent_g1_refine()->set_use_cache(false);
  4101   concurrent_g1_refine()->clear_hot_cache_claimed_index();
  4103   int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
  4104   set_par_threads(n_workers);
  4105   G1ParTask g1_par_task(this, n_workers, _task_queues);
  4107   init_for_evac_failure(NULL);
  4109   change_strong_roots_parity();  // In preparation for parallel strong roots.
  4110   rem_set()->prepare_for_younger_refs_iterate(true);
  4112   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  4113   double start_par = os::elapsedTime();
  4114   if (ParallelGCThreads > 0) {
  4115     // The individual threads will set their evac-failure closures.
  4116     workers()->run_task(&g1_par_task);
  4117   } else {
  4118     g1_par_task.work(0);
  4121   double par_time = (os::elapsedTime() - start_par) * 1000.0;
  4122   g1_policy()->record_par_time(par_time);
  4123   set_par_threads(0);
  4124   // Is this the right thing to do here?  We don't save marks
  4125   // on individual heap regions when we allocate from
  4126   // them in parallel, so this seems like the correct place for this.
  4127   retire_all_alloc_regions();
  4129     G1IsAliveClosure is_alive(this);
  4130     G1KeepAliveClosure keep_alive(this);
  4131     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  4133   release_gc_alloc_regions(false /* totally */);
  4134   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  4136   concurrent_g1_refine()->clear_hot_cache();
  4137   concurrent_g1_refine()->set_use_cache(true);
  4139   finalize_for_evac_failure();
  4141   // Must do this before removing self-forwarding pointers, which clears
  4142   // the per-region evac-failure flags.
  4143   concurrent_mark()->complete_marking_in_collection_set();
  4145   if (evacuation_failed()) {
  4146     remove_self_forwarding_pointers();
  4147     if (PrintGCDetails) {
  4148       gclog_or_tty->print(" (evacuation failed)");
  4149     } else if (PrintGC) {
  4150       gclog_or_tty->print("--");
  4154   if (G1DeferredRSUpdate) {
  4155     RedirtyLoggedCardTableEntryFastClosure redirty;
  4156     dirty_card_queue_set().set_closure(&redirty);
  4157     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  4158     JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
  4159     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  4162   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4165 void G1CollectedHeap::free_region(HeapRegion* hr) {
  4166   size_t pre_used = 0;
  4167   size_t cleared_h_regions = 0;
  4168   size_t freed_regions = 0;
  4169   UncleanRegionList local_list;
  4171   HeapWord* start = hr->bottom();
  4172   HeapWord* end   = hr->prev_top_at_mark_start();
  4173   size_t used_bytes = hr->used();
  4174   size_t live_bytes = hr->max_live_bytes();
  4175   if (used_bytes > 0) {
  4176     guarantee( live_bytes <= used_bytes, "invariant" );
  4177   } else {
  4178     guarantee( live_bytes == 0, "invariant" );
  4181   size_t garbage_bytes = used_bytes - live_bytes;
  4182   if (garbage_bytes > 0)
  4183     g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
  4185   free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
  4186                    &local_list);
  4187   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
  4188                           &local_list);
  4191 void
  4192 G1CollectedHeap::free_region_work(HeapRegion* hr,
  4193                                   size_t& pre_used,
  4194                                   size_t& cleared_h_regions,
  4195                                   size_t& freed_regions,
  4196                                   UncleanRegionList* list,
  4197                                   bool par) {
  4198   pre_used += hr->used();
  4199   if (hr->isHumongous()) {
  4200     assert(hr->startsHumongous(),
  4201            "Only the start of a humongous region should be freed.");
  4202     int ind = _hrs->find(hr);
  4203     assert(ind != -1, "Should have an index.");
  4204     // Clear the start region.
  4205     hr->hr_clear(par, true /*clear_space*/);
  4206     list->insert_before_head(hr);
  4207     cleared_h_regions++;
  4208     freed_regions++;
  4209     // Clear any continued regions.
  4210     ind++;
  4211     while ((size_t)ind < n_regions()) {
  4212       HeapRegion* hrc = _hrs->at(ind);
  4213       if (!hrc->continuesHumongous()) break;
  4214       // Otherwise, does continue the H region.
  4215       assert(hrc->humongous_start_region() == hr, "Huh?");
  4216       hrc->hr_clear(par, true /*clear_space*/);
  4217       cleared_h_regions++;
  4218       freed_regions++;
  4219       list->insert_before_head(hrc);
  4220       ind++;
  4222   } else {
  4223     hr->hr_clear(par, true /*clear_space*/);
  4224     list->insert_before_head(hr);
  4225     freed_regions++;
  4226     // If we're using clear2, this should not be enabled.
  4227     // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
  4231 void G1CollectedHeap::finish_free_region_work(size_t pre_used,
  4232                                               size_t cleared_h_regions,
  4233                                               size_t freed_regions,
  4234                                               UncleanRegionList* list) {
  4235   if (list != NULL && list->sz() > 0) {
  4236     prepend_region_list_on_unclean_list(list);
  4238   // Acquire a lock, if we're parallel, to update possibly-shared
  4239   // variables.
  4240   Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
  4242     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  4243     _summary_bytes_used -= pre_used;
  4244     _num_humongous_regions -= (int) cleared_h_regions;
  4245     _free_regions += freed_regions;
  4250 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
  4251   while (list != NULL) {
  4252     guarantee( list->is_young(), "invariant" );
  4254     HeapWord* bottom = list->bottom();
  4255     HeapWord* end = list->end();
  4256     MemRegion mr(bottom, end);
  4257     ct_bs->dirty(mr);
  4259     list = list->get_next_young_region();
  4264 class G1ParCleanupCTTask : public AbstractGangTask {
  4265   CardTableModRefBS* _ct_bs;
  4266   G1CollectedHeap* _g1h;
  4267   HeapRegion* volatile _so_head;
  4268   HeapRegion* volatile _su_head;
  4269 public:
  4270   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
  4271                      G1CollectedHeap* g1h,
  4272                      HeapRegion* scan_only_list,
  4273                      HeapRegion* survivor_list) :
  4274     AbstractGangTask("G1 Par Cleanup CT Task"),
  4275     _ct_bs(ct_bs),
  4276     _g1h(g1h),
  4277     _so_head(scan_only_list),
  4278     _su_head(survivor_list)
  4279   { }
  4281   void work(int i) {
  4282     HeapRegion* r;
  4283     while (r = _g1h->pop_dirty_cards_region()) {
  4284       clear_cards(r);
  4286     // Redirty the cards of the scan-only and survivor regions.
  4287     dirty_list(&this->_so_head);
  4288     dirty_list(&this->_su_head);
  4291   void clear_cards(HeapRegion* r) {
  4292     // Cards for Survivor and Scan-Only regions will be dirtied later.
  4293     if (!r->is_scan_only() && !r->is_survivor()) {
  4294       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  4298   void dirty_list(HeapRegion* volatile * head_ptr) {
  4299     HeapRegion* head;
  4300     do {
  4301       // Pop region off the list.
  4302       head = *head_ptr;
  4303       if (head != NULL) {
  4304         HeapRegion* r = (HeapRegion*)
  4305           Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
  4306         if (r == head) {
  4307           assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
  4308           _ct_bs->dirty(MemRegion(r->bottom(), r->end()));
  4311     } while (*head_ptr != NULL);
  4313 };
  4316 #ifndef PRODUCT
  4317 class G1VerifyCardTableCleanup: public HeapRegionClosure {
  4318   CardTableModRefBS* _ct_bs;
  4319 public:
  4320   G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
  4321     : _ct_bs(ct_bs)
  4322   { }
  4323   virtual bool doHeapRegion(HeapRegion* r)
  4325     MemRegion mr(r->bottom(), r->end());
  4326     if (r->is_scan_only() || r->is_survivor()) {
  4327       _ct_bs->verify_dirty_region(mr);
  4328     } else {
  4329       _ct_bs->verify_clean_region(mr);
  4331     return false;
  4333 };
  4334 #endif
  4336 void G1CollectedHeap::cleanUpCardTable() {
  4337   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  4338   double start = os::elapsedTime();
  4340   // Iterate over the dirty cards region list.
  4341   G1ParCleanupCTTask cleanup_task(ct_bs, this,
  4342                                   _young_list->first_scan_only_region(),
  4343                                   _young_list->first_survivor_region());
  4344   if (ParallelGCThreads > 0) {
  4345     set_par_threads(workers()->total_workers());
  4346     workers()->run_task(&cleanup_task);
  4347     set_par_threads(0);
  4348   } else {
  4349     while (_dirty_cards_region_list) {
  4350       HeapRegion* r = _dirty_cards_region_list;
  4351       cleanup_task.clear_cards(r);
  4352       _dirty_cards_region_list = r->get_next_dirty_cards_region();
  4353       if (_dirty_cards_region_list == r) {
  4354         // The last region.
  4355         _dirty_cards_region_list = NULL;
  4357       r->set_next_dirty_cards_region(NULL);
  4359     // now, redirty the cards of the scan-only and survivor regions
  4360     // (it seemed faster to do it this way, instead of iterating over
  4361     // all regions and then clearing / dirtying as appropriate)
  4362     dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
  4363     dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
  4365   double elapsed = os::elapsedTime() - start;
  4366   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
  4367 #ifndef PRODUCT
  4368   if (G1VerifyCTCleanup || VerifyAfterGC) {
  4369     G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
  4370     heap_region_iterate(&cleanup_verifier);
  4372 #endif
  4375 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
  4376   if (g1_policy()->should_do_collection_pause(word_size)) {
  4377     do_collection_pause();
  4381 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  4382   double young_time_ms     = 0.0;
  4383   double non_young_time_ms = 0.0;
  4385   G1CollectorPolicy* policy = g1_policy();
  4387   double start_sec = os::elapsedTime();
  4388   bool non_young = true;
  4390   HeapRegion* cur = cs_head;
  4391   int age_bound = -1;
  4392   size_t rs_lengths = 0;
  4394   while (cur != NULL) {
  4395     if (non_young) {
  4396       if (cur->is_young()) {
  4397         double end_sec = os::elapsedTime();
  4398         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  4399         non_young_time_ms += elapsed_ms;
  4401         start_sec = os::elapsedTime();
  4402         non_young = false;
  4404     } else {
  4405       if (!cur->is_on_free_list()) {
  4406         double end_sec = os::elapsedTime();
  4407         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  4408         young_time_ms += elapsed_ms;
  4410         start_sec = os::elapsedTime();
  4411         non_young = true;
  4415     rs_lengths += cur->rem_set()->occupied();
  4417     HeapRegion* next = cur->next_in_collection_set();
  4418     assert(cur->in_collection_set(), "bad CS");
  4419     cur->set_next_in_collection_set(NULL);
  4420     cur->set_in_collection_set(false);
  4422     if (cur->is_young()) {
  4423       int index = cur->young_index_in_cset();
  4424       guarantee( index != -1, "invariant" );
  4425       guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
  4426       size_t words_survived = _surviving_young_words[index];
  4427       cur->record_surv_words_in_group(words_survived);
  4428     } else {
  4429       int index = cur->young_index_in_cset();
  4430       guarantee( index == -1, "invariant" );
  4433     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  4434             (!cur->is_young() && cur->young_index_in_cset() == -1),
  4435             "invariant" );
  4437     if (!cur->evacuation_failed()) {
  4438       // And the region is empty.
  4439       assert(!cur->is_empty(),
  4440              "Should not have empty regions in a CS.");
  4441       free_region(cur);
  4442     } else {
  4443       guarantee( !cur->is_scan_only(), "should not be scan only" );
  4444       cur->uninstall_surv_rate_group();
  4445       if (cur->is_young())
  4446         cur->set_young_index_in_cset(-1);
  4447       cur->set_not_young();
  4448       cur->set_evacuation_failed(false);
  4450     cur = next;
  4453   policy->record_max_rs_lengths(rs_lengths);
  4454   policy->cset_regions_freed();
  4456   double end_sec = os::elapsedTime();
  4457   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  4458   if (non_young)
  4459     non_young_time_ms += elapsed_ms;
  4460   else
  4461     young_time_ms += elapsed_ms;
  4463   policy->record_young_free_cset_time_ms(young_time_ms);
  4464   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
  4467 HeapRegion*
  4468 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
  4469   assert(ZF_mon->owned_by_self(), "Precondition");
  4470   HeapRegion* res = pop_unclean_region_list_locked();
  4471   if (res != NULL) {
  4472     assert(!res->continuesHumongous() &&
  4473            res->zero_fill_state() != HeapRegion::Allocated,
  4474            "Only free regions on unclean list.");
  4475     if (zero_filled) {
  4476       res->ensure_zero_filled_locked();
  4477       res->set_zero_fill_allocated();
  4480   return res;
  4483 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
  4484   MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
  4485   return alloc_region_from_unclean_list_locked(zero_filled);
  4488 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
  4489   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4490   put_region_on_unclean_list_locked(r);
  4491   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  4494 void G1CollectedHeap::set_unclean_regions_coming(bool b) {
  4495   MutexLockerEx x(Cleanup_mon);
  4496   set_unclean_regions_coming_locked(b);
  4499 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
  4500   assert(Cleanup_mon->owned_by_self(), "Precondition");
  4501   _unclean_regions_coming = b;
  4502   // Wake up mutator threads that might be waiting for completeCleanup to
  4503   // finish.
  4504   if (!b) Cleanup_mon->notify_all();
  4507 void G1CollectedHeap::wait_for_cleanup_complete() {
  4508   MutexLockerEx x(Cleanup_mon);
  4509   wait_for_cleanup_complete_locked();
  4512 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
  4513   assert(Cleanup_mon->owned_by_self(), "precondition");
  4514   while (_unclean_regions_coming) {
  4515     Cleanup_mon->wait();
  4519 void
  4520 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
  4521   assert(ZF_mon->owned_by_self(), "precondition.");
  4522   _unclean_region_list.insert_before_head(r);
  4525 void
  4526 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
  4527   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4528   prepend_region_list_on_unclean_list_locked(list);
  4529   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  4532 void
  4533 G1CollectedHeap::
  4534 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
  4535   assert(ZF_mon->owned_by_self(), "precondition.");
  4536   _unclean_region_list.prepend_list(list);
  4539 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
  4540   assert(ZF_mon->owned_by_self(), "precondition.");
  4541   HeapRegion* res = _unclean_region_list.pop();
  4542   if (res != NULL) {
  4543     // Inform ZF thread that there's a new unclean head.
  4544     if (_unclean_region_list.hd() != NULL && should_zf())
  4545       ZF_mon->notify_all();
  4547   return res;
  4550 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
  4551   assert(ZF_mon->owned_by_self(), "precondition.");
  4552   return _unclean_region_list.hd();
  4556 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
  4557   assert(ZF_mon->owned_by_self(), "Precondition");
  4558   HeapRegion* r = peek_unclean_region_list_locked();
  4559   if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
  4560     // Result of below must be equal to "r", since we hold the lock.
  4561     (void)pop_unclean_region_list_locked();
  4562     put_free_region_on_list_locked(r);
  4563     return true;
  4564   } else {
  4565     return false;
  4569 bool G1CollectedHeap::move_cleaned_region_to_free_list() {
  4570   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4571   return move_cleaned_region_to_free_list_locked();
  4575 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
  4576   assert(ZF_mon->owned_by_self(), "precondition.");
  4577   assert(_free_region_list_size == free_region_list_length(), "Inv");
  4578   assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
  4579         "Regions on free list must be zero filled");
  4580   assert(!r->isHumongous(), "Must not be humongous.");
  4581   assert(r->is_empty(), "Better be empty");
  4582   assert(!r->is_on_free_list(),
  4583          "Better not already be on free list");
  4584   assert(!r->is_on_unclean_list(),
  4585          "Better not already be on unclean list");
  4586   r->set_on_free_list(true);
  4587   r->set_next_on_free_list(_free_region_list);
  4588   _free_region_list = r;
  4589   _free_region_list_size++;
  4590   assert(_free_region_list_size == free_region_list_length(), "Inv");
  4593 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
  4594   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4595   put_free_region_on_list_locked(r);
  4598 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
  4599   assert(ZF_mon->owned_by_self(), "precondition.");
  4600   assert(_free_region_list_size == free_region_list_length(), "Inv");
  4601   HeapRegion* res = _free_region_list;
  4602   if (res != NULL) {
  4603     _free_region_list = res->next_from_free_list();
  4604     _free_region_list_size--;
  4605     res->set_on_free_list(false);
  4606     res->set_next_on_free_list(NULL);
  4607     assert(_free_region_list_size == free_region_list_length(), "Inv");
  4609   return res;
  4613 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
  4614   // By self, or on behalf of self.
  4615   assert(Heap_lock->is_locked(), "Precondition");
  4616   HeapRegion* res = NULL;
  4617   bool first = true;
  4618   while (res == NULL) {
  4619     if (zero_filled || !first) {
  4620       MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4621       res = pop_free_region_list_locked();
  4622       if (res != NULL) {
  4623         assert(!res->zero_fill_is_allocated(),
  4624                "No allocated regions on free list.");
  4625         res->set_zero_fill_allocated();
  4626       } else if (!first) {
  4627         break;  // We tried both, time to return NULL.
  4631     if (res == NULL) {
  4632       res = alloc_region_from_unclean_list(zero_filled);
  4634     assert(res == NULL ||
  4635            !zero_filled ||
  4636            res->zero_fill_is_allocated(),
  4637            "We must have allocated the region we're returning");
  4638     first = false;
  4640   return res;
  4643 void G1CollectedHeap::remove_allocated_regions_from_lists() {
  4644   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4646     HeapRegion* prev = NULL;
  4647     HeapRegion* cur = _unclean_region_list.hd();
  4648     while (cur != NULL) {
  4649       HeapRegion* next = cur->next_from_unclean_list();
  4650       if (cur->zero_fill_is_allocated()) {
  4651         // Remove from the list.
  4652         if (prev == NULL) {
  4653           (void)_unclean_region_list.pop();
  4654         } else {
  4655           _unclean_region_list.delete_after(prev);
  4657         cur->set_on_unclean_list(false);
  4658         cur->set_next_on_unclean_list(NULL);
  4659       } else {
  4660         prev = cur;
  4662       cur = next;
  4664     assert(_unclean_region_list.sz() == unclean_region_list_length(),
  4665            "Inv");
  4669     HeapRegion* prev = NULL;
  4670     HeapRegion* cur = _free_region_list;
  4671     while (cur != NULL) {
  4672       HeapRegion* next = cur->next_from_free_list();
  4673       if (cur->zero_fill_is_allocated()) {
  4674         // Remove from the list.
  4675         if (prev == NULL) {
  4676           _free_region_list = cur->next_from_free_list();
  4677         } else {
  4678           prev->set_next_on_free_list(cur->next_from_free_list());
  4680         cur->set_on_free_list(false);
  4681         cur->set_next_on_free_list(NULL);
  4682         _free_region_list_size--;
  4683       } else {
  4684         prev = cur;
  4686       cur = next;
  4688     assert(_free_region_list_size == free_region_list_length(), "Inv");
  4692 bool G1CollectedHeap::verify_region_lists() {
  4693   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4694   return verify_region_lists_locked();
  4697 bool G1CollectedHeap::verify_region_lists_locked() {
  4698   HeapRegion* unclean = _unclean_region_list.hd();
  4699   while (unclean != NULL) {
  4700     guarantee(unclean->is_on_unclean_list(), "Well, it is!");
  4701     guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
  4702     guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
  4703               "Everything else is possible.");
  4704     unclean = unclean->next_from_unclean_list();
  4706   guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
  4708   HeapRegion* free_r = _free_region_list;
  4709   while (free_r != NULL) {
  4710     assert(free_r->is_on_free_list(), "Well, it is!");
  4711     assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
  4712     switch (free_r->zero_fill_state()) {
  4713     case HeapRegion::NotZeroFilled:
  4714     case HeapRegion::ZeroFilling:
  4715       guarantee(false, "Should not be on free list.");
  4716       break;
  4717     default:
  4718       // Everything else is possible.
  4719       break;
  4721     free_r = free_r->next_from_free_list();
  4723   guarantee(_free_region_list_size == free_region_list_length(), "Inv");
  4724   // If we didn't do an assertion...
  4725   return true;
  4728 size_t G1CollectedHeap::free_region_list_length() {
  4729   assert(ZF_mon->owned_by_self(), "precondition.");
  4730   size_t len = 0;
  4731   HeapRegion* cur = _free_region_list;
  4732   while (cur != NULL) {
  4733     len++;
  4734     cur = cur->next_from_free_list();
  4736   return len;
  4739 size_t G1CollectedHeap::unclean_region_list_length() {
  4740   assert(ZF_mon->owned_by_self(), "precondition.");
  4741   return _unclean_region_list.length();
  4744 size_t G1CollectedHeap::n_regions() {
  4745   return _hrs->length();
  4748 size_t G1CollectedHeap::max_regions() {
  4749   return
  4750     (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
  4751     HeapRegion::GrainBytes;
  4754 size_t G1CollectedHeap::free_regions() {
  4755   /* Possibly-expensive assert.
  4756   assert(_free_regions == count_free_regions(),
  4757          "_free_regions is off.");
  4758   */
  4759   return _free_regions;
  4762 bool G1CollectedHeap::should_zf() {
  4763   return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
  4766 class RegionCounter: public HeapRegionClosure {
  4767   size_t _n;
  4768 public:
  4769   RegionCounter() : _n(0) {}
  4770   bool doHeapRegion(HeapRegion* r) {
  4771     if (r->is_empty()) {
  4772       assert(!r->isHumongous(), "H regions should not be empty.");
  4773       _n++;
  4775     return false;
  4777   int res() { return (int) _n; }
  4778 };
  4780 size_t G1CollectedHeap::count_free_regions() {
  4781   RegionCounter rc;
  4782   heap_region_iterate(&rc);
  4783   size_t n = rc.res();
  4784   if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
  4785     n--;
  4786   return n;
  4789 size_t G1CollectedHeap::count_free_regions_list() {
  4790   size_t n = 0;
  4791   size_t o = 0;
  4792   ZF_mon->lock_without_safepoint_check();
  4793   HeapRegion* cur = _free_region_list;
  4794   while (cur != NULL) {
  4795     cur = cur->next_from_free_list();
  4796     n++;
  4798   size_t m = unclean_region_list_length();
  4799   ZF_mon->unlock();
  4800   return n + m;
  4803 bool G1CollectedHeap::should_set_young_locked() {
  4804   assert(heap_lock_held_for_gc(),
  4805               "the heap lock should already be held by or for this thread");
  4806   return  (g1_policy()->in_young_gc_mode() &&
  4807            g1_policy()->should_add_next_region_to_young_list());
  4810 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  4811   assert(heap_lock_held_for_gc(),
  4812               "the heap lock should already be held by or for this thread");
  4813   _young_list->push_region(hr);
  4814   g1_policy()->set_region_short_lived(hr);
  4817 class NoYoungRegionsClosure: public HeapRegionClosure {
  4818 private:
  4819   bool _success;
  4820 public:
  4821   NoYoungRegionsClosure() : _success(true) { }
  4822   bool doHeapRegion(HeapRegion* r) {
  4823     if (r->is_young()) {
  4824       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  4825                              r->bottom(), r->end());
  4826       _success = false;
  4828     return false;
  4830   bool success() { return _success; }
  4831 };
  4833 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
  4834                                              bool check_sample) {
  4835   bool ret = true;
  4837   ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
  4838   if (!ignore_scan_only_list) {
  4839     NoYoungRegionsClosure closure;
  4840     heap_region_iterate(&closure);
  4841     ret = ret && closure.success();
  4844   return ret;
  4847 void G1CollectedHeap::empty_young_list() {
  4848   assert(heap_lock_held_for_gc(),
  4849               "the heap lock should already be held by or for this thread");
  4850   assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
  4852   _young_list->empty_list();
  4855 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
  4856   bool no_allocs = true;
  4857   for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
  4858     HeapRegion* r = _gc_alloc_regions[ap];
  4859     no_allocs = r == NULL || r->saved_mark_at_top();
  4861   return no_allocs;
  4864 void G1CollectedHeap::retire_all_alloc_regions() {
  4865   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  4866     HeapRegion* r = _gc_alloc_regions[ap];
  4867     if (r != NULL) {
  4868       // Check for aliases.
  4869       bool has_processed_alias = false;
  4870       for (int i = 0; i < ap; ++i) {
  4871         if (_gc_alloc_regions[i] == r) {
  4872           has_processed_alias = true;
  4873           break;
  4876       if (!has_processed_alias) {
  4877         retire_alloc_region(r, false /* par */);
  4884 // Done at the start of full GC.
  4885 void G1CollectedHeap::tear_down_region_lists() {
  4886   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4887   while (pop_unclean_region_list_locked() != NULL) ;
  4888   assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
  4889          "Postconditions of loop.")
  4890   while (pop_free_region_list_locked() != NULL) ;
  4891   assert(_free_region_list == NULL, "Postcondition of loop.");
  4892   if (_free_region_list_size != 0) {
  4893     gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
  4894     print_on(gclog_or_tty, true /* extended */);
  4896   assert(_free_region_list_size == 0, "Postconditions of loop.");
  4900 class RegionResetter: public HeapRegionClosure {
  4901   G1CollectedHeap* _g1;
  4902   int _n;
  4903 public:
  4904   RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  4905   bool doHeapRegion(HeapRegion* r) {
  4906     if (r->continuesHumongous()) return false;
  4907     if (r->top() > r->bottom()) {
  4908       if (r->top() < r->end()) {
  4909         Copy::fill_to_words(r->top(),
  4910                           pointer_delta(r->end(), r->top()));
  4912       r->set_zero_fill_allocated();
  4913     } else {
  4914       assert(r->is_empty(), "tautology");
  4915       _n++;
  4916       switch (r->zero_fill_state()) {
  4917         case HeapRegion::NotZeroFilled:
  4918         case HeapRegion::ZeroFilling:
  4919           _g1->put_region_on_unclean_list_locked(r);
  4920           break;
  4921         case HeapRegion::Allocated:
  4922           r->set_zero_fill_complete();
  4923           // no break; go on to put on free list.
  4924         case HeapRegion::ZeroFilled:
  4925           _g1->put_free_region_on_list_locked(r);
  4926           break;
  4929     return false;
  4932   int getFreeRegionCount() {return _n;}
  4933 };
  4935 // Done at the end of full GC.
  4936 void G1CollectedHeap::rebuild_region_lists() {
  4937   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4938   // This needs to go at the end of the full GC.
  4939   RegionResetter rs;
  4940   heap_region_iterate(&rs);
  4941   _free_regions = rs.getFreeRegionCount();
  4942   // Tell the ZF thread it may have work to do.
  4943   if (should_zf()) ZF_mon->notify_all();
  4946 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
  4947   G1CollectedHeap* _g1;
  4948   int _n;
  4949 public:
  4950   UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  4951   bool doHeapRegion(HeapRegion* r) {
  4952     if (r->continuesHumongous()) return false;
  4953     if (r->top() > r->bottom()) {
  4954       // There are assertions in "set_zero_fill_needed()" below that
  4955       // require top() == bottom(), so this is technically illegal.
  4956       // We'll skirt the law here, by making that true temporarily.
  4957       DEBUG_ONLY(HeapWord* save_top = r->top();
  4958                  r->set_top(r->bottom()));
  4959       r->set_zero_fill_needed();
  4960       DEBUG_ONLY(r->set_top(save_top));
  4962     return false;
  4964 };
  4966 // Done at the start of full GC.
  4967 void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
  4968   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4969   // This needs to go at the end of the full GC.
  4970   UsedRegionsNeedZeroFillSetter rs;
  4971   heap_region_iterate(&rs);
  4974 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  4975   _refine_cte_cl->set_concurrent(concurrent);
  4978 #ifndef PRODUCT
  4980 class PrintHeapRegionClosure: public HeapRegionClosure {
  4981 public:
  4982   bool doHeapRegion(HeapRegion *r) {
  4983     gclog_or_tty->print("Region: "PTR_FORMAT":", r);
  4984     if (r != NULL) {
  4985       if (r->is_on_free_list())
  4986         gclog_or_tty->print("Free ");
  4987       if (r->is_young())
  4988         gclog_or_tty->print("Young ");
  4989       if (r->isHumongous())
  4990         gclog_or_tty->print("Is Humongous ");
  4991       r->print();
  4993     return false;
  4995 };
  4997 class SortHeapRegionClosure : public HeapRegionClosure {
  4998   size_t young_regions,free_regions, unclean_regions;
  4999   size_t hum_regions, count;
  5000   size_t unaccounted, cur_unclean, cur_alloc;
  5001   size_t total_free;
  5002   HeapRegion* cur;
  5003 public:
  5004   SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
  5005     free_regions(0), unclean_regions(0),
  5006     hum_regions(0),
  5007     count(0), unaccounted(0),
  5008     cur_alloc(0), total_free(0)
  5009   {}
  5010   bool doHeapRegion(HeapRegion *r) {
  5011     count++;
  5012     if (r->is_on_free_list()) free_regions++;
  5013     else if (r->is_on_unclean_list()) unclean_regions++;
  5014     else if (r->isHumongous())  hum_regions++;
  5015     else if (r->is_young()) young_regions++;
  5016     else if (r == cur) cur_alloc++;
  5017     else unaccounted++;
  5018     return false;
  5020   void print() {
  5021     total_free = free_regions + unclean_regions;
  5022     gclog_or_tty->print("%d regions\n", count);
  5023     gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
  5024                         total_free, free_regions, unclean_regions);
  5025     gclog_or_tty->print("%d humongous %d young\n",
  5026                         hum_regions, young_regions);
  5027     gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
  5028     gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
  5030 };
  5032 void G1CollectedHeap::print_region_counts() {
  5033   SortHeapRegionClosure sc(_cur_alloc_region);
  5034   PrintHeapRegionClosure cl;
  5035   heap_region_iterate(&cl);
  5036   heap_region_iterate(&sc);
  5037   sc.print();
  5038   print_region_accounting_info();
  5039 };
  5041 bool G1CollectedHeap::regions_accounted_for() {
  5042   // TODO: regions accounting for young/survivor/tenured
  5043   return true;
  5046 bool G1CollectedHeap::print_region_accounting_info() {
  5047   gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
  5048                          free_regions(),
  5049                          count_free_regions(), count_free_regions_list(),
  5050                          _free_region_list_size, _unclean_region_list.sz());
  5051   gclog_or_tty->print_cr("cur_alloc: %d.",
  5052                          (_cur_alloc_region == NULL ? 0 : 1));
  5053   gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
  5055   // TODO: check regions accounting for young/survivor/tenured
  5056   return true;
  5059 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  5060   HeapRegion* hr = heap_region_containing(p);
  5061   if (hr == NULL) {
  5062     return is_in_permanent(p);
  5063   } else {
  5064     return hr->is_in(p);
  5067 #endif // PRODUCT
  5069 void G1CollectedHeap::g1_unimplemented() {
  5070   // Unimplemented();

mercurial