src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Mon, 03 Aug 2009 12:59:30 -0700

author
johnc
date
Mon, 03 Aug 2009 12:59:30 -0700
changeset 1324
15c5903cf9e1
parent 1319
83b687ce3090
child 1325
6cb8e9df7174
permissions
-rw-r--r--

6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_g1CollectedHeap.cpp.incl"
    28 // turn it on so that the contents of the young list (scan-only /
    29 // to-be-collected) are printed at "strategic" points before / during
    30 // / after the collection --- this is useful for debugging
    31 #define SCAN_ONLY_VERBOSE 0
    32 // CURRENT STATUS
    33 // This file is under construction.  Search for "FIXME".
    35 // INVARIANTS/NOTES
    36 //
    37 // All allocation activity covered by the G1CollectedHeap interface is
    38 //   serialized by acquiring the HeapLock.  This happens in
    39 //   mem_allocate_work, which all such allocation functions call.
    40 //   (Note that this does not apply to TLAB allocation, which is not part
    41 //   of this interface: it is done by clients of this interface.)
    43 // Local to this file.
    45 class RefineCardTableEntryClosure: public CardTableEntryClosure {
    46   SuspendibleThreadSet* _sts;
    47   G1RemSet* _g1rs;
    48   ConcurrentG1Refine* _cg1r;
    49   bool _concurrent;
    50 public:
    51   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
    52                               G1RemSet* g1rs,
    53                               ConcurrentG1Refine* cg1r) :
    54     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
    55   {}
    56   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    57     _g1rs->concurrentRefineOneCard(card_ptr, worker_i);
    58     if (_concurrent && _sts->should_yield()) {
    59       // Caller will actually yield.
    60       return false;
    61     }
    62     // Otherwise, we finished successfully; return true.
    63     return true;
    64   }
    65   void set_concurrent(bool b) { _concurrent = b; }
    66 };
    69 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
    70   int _calls;
    71   G1CollectedHeap* _g1h;
    72   CardTableModRefBS* _ctbs;
    73   int _histo[256];
    74 public:
    75   ClearLoggedCardTableEntryClosure() :
    76     _calls(0)
    77   {
    78     _g1h = G1CollectedHeap::heap();
    79     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
    80     for (int i = 0; i < 256; i++) _histo[i] = 0;
    81   }
    82   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
    83     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
    84       _calls++;
    85       unsigned char* ujb = (unsigned char*)card_ptr;
    86       int ind = (int)(*ujb);
    87       _histo[ind]++;
    88       *card_ptr = -1;
    89     }
    90     return true;
    91   }
    92   int calls() { return _calls; }
    93   void print_histo() {
    94     gclog_or_tty->print_cr("Card table value histogram:");
    95     for (int i = 0; i < 256; i++) {
    96       if (_histo[i] != 0) {
    97         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
    98       }
    99     }
   100   }
   101 };
   103 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
   104   int _calls;
   105   G1CollectedHeap* _g1h;
   106   CardTableModRefBS* _ctbs;
   107 public:
   108   RedirtyLoggedCardTableEntryClosure() :
   109     _calls(0)
   110   {
   111     _g1h = G1CollectedHeap::heap();
   112     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
   113   }
   114   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   115     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
   116       _calls++;
   117       *card_ptr = 0;
   118     }
   119     return true;
   120   }
   121   int calls() { return _calls; }
   122 };
   124 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
   125 public:
   126   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
   127     *card_ptr = CardTableModRefBS::dirty_card_val();
   128     return true;
   129   }
   130 };
   132 YoungList::YoungList(G1CollectedHeap* g1h)
   133   : _g1h(g1h), _head(NULL),
   134     _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
   135     _length(0), _scan_only_length(0),
   136     _last_sampled_rs_lengths(0),
   137     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
   138 {
   139   guarantee( check_list_empty(false), "just making sure..." );
   140 }
   142 void YoungList::push_region(HeapRegion *hr) {
   143   assert(!hr->is_young(), "should not already be young");
   144   assert(hr->get_next_young_region() == NULL, "cause it should!");
   146   hr->set_next_young_region(_head);
   147   _head = hr;
   149   hr->set_young();
   150   double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
   151   ++_length;
   152 }
   154 void YoungList::add_survivor_region(HeapRegion* hr) {
   155   assert(hr->is_survivor(), "should be flagged as survivor region");
   156   assert(hr->get_next_young_region() == NULL, "cause it should!");
   158   hr->set_next_young_region(_survivor_head);
   159   if (_survivor_head == NULL) {
   160     _survivor_tail = hr;
   161   }
   162   _survivor_head = hr;
   164   ++_survivor_length;
   165 }
   167 HeapRegion* YoungList::pop_region() {
   168   while (_head != NULL) {
   169     assert( length() > 0, "list should not be empty" );
   170     HeapRegion* ret = _head;
   171     _head = ret->get_next_young_region();
   172     ret->set_next_young_region(NULL);
   173     --_length;
   174     assert(ret->is_young(), "region should be very young");
   176     // Replace 'Survivor' region type with 'Young'. So the region will
   177     // be treated as a young region and will not be 'confused' with
   178     // newly created survivor regions.
   179     if (ret->is_survivor()) {
   180       ret->set_young();
   181     }
   183     if (!ret->is_scan_only()) {
   184       return ret;
   185     }
   187     // scan-only, we'll add it to the scan-only list
   188     if (_scan_only_tail == NULL) {
   189       guarantee( _scan_only_head == NULL, "invariant" );
   191       _scan_only_head = ret;
   192       _curr_scan_only = ret;
   193     } else {
   194       guarantee( _scan_only_head != NULL, "invariant" );
   195       _scan_only_tail->set_next_young_region(ret);
   196     }
   197     guarantee( ret->get_next_young_region() == NULL, "invariant" );
   198     _scan_only_tail = ret;
   200     // no need to be tagged as scan-only any more
   201     ret->set_young();
   203     ++_scan_only_length;
   204   }
   205   assert( length() == 0, "list should be empty" );
   206   return NULL;
   207 }
   209 void YoungList::empty_list(HeapRegion* list) {
   210   while (list != NULL) {
   211     HeapRegion* next = list->get_next_young_region();
   212     list->set_next_young_region(NULL);
   213     list->uninstall_surv_rate_group();
   214     list->set_not_young();
   215     list = next;
   216   }
   217 }
   219 void YoungList::empty_list() {
   220   assert(check_list_well_formed(), "young list should be well formed");
   222   empty_list(_head);
   223   _head = NULL;
   224   _length = 0;
   226   empty_list(_scan_only_head);
   227   _scan_only_head = NULL;
   228   _scan_only_tail = NULL;
   229   _scan_only_length = 0;
   230   _curr_scan_only = NULL;
   232   empty_list(_survivor_head);
   233   _survivor_head = NULL;
   234   _survivor_tail = NULL;
   235   _survivor_length = 0;
   237   _last_sampled_rs_lengths = 0;
   239   assert(check_list_empty(false), "just making sure...");
   240 }
   242 bool YoungList::check_list_well_formed() {
   243   bool ret = true;
   245   size_t length = 0;
   246   HeapRegion* curr = _head;
   247   HeapRegion* last = NULL;
   248   while (curr != NULL) {
   249     if (!curr->is_young() || curr->is_scan_only()) {
   250       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
   251                              "incorrectly tagged (%d, %d)",
   252                              curr->bottom(), curr->end(),
   253                              curr->is_young(), curr->is_scan_only());
   254       ret = false;
   255     }
   256     ++length;
   257     last = curr;
   258     curr = curr->get_next_young_region();
   259   }
   260   ret = ret && (length == _length);
   262   if (!ret) {
   263     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
   264     gclog_or_tty->print_cr("###   list has %d entries, _length is %d",
   265                            length, _length);
   266   }
   268   bool scan_only_ret = true;
   269   length = 0;
   270   curr = _scan_only_head;
   271   last = NULL;
   272   while (curr != NULL) {
   273     if (!curr->is_young() || curr->is_scan_only()) {
   274       gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
   275                              "incorrectly tagged (%d, %d)",
   276                              curr->bottom(), curr->end(),
   277                              curr->is_young(), curr->is_scan_only());
   278       scan_only_ret = false;
   279     }
   280     ++length;
   281     last = curr;
   282     curr = curr->get_next_young_region();
   283   }
   284   scan_only_ret = scan_only_ret && (length == _scan_only_length);
   286   if ( (last != _scan_only_tail) ||
   287        (_scan_only_head == NULL && _scan_only_tail != NULL) ||
   288        (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
   289      gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
   290      scan_only_ret = false;
   291   }
   293   if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
   294     gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
   295     scan_only_ret = false;
   296    }
   298   if (!scan_only_ret) {
   299     gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
   300     gclog_or_tty->print_cr("###   list has %d entries, _scan_only_length is %d",
   301                   length, _scan_only_length);
   302   }
   304   return ret && scan_only_ret;
   305 }
   307 bool YoungList::check_list_empty(bool ignore_scan_only_list,
   308                                  bool check_sample) {
   309   bool ret = true;
   311   if (_length != 0) {
   312     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
   313                   _length);
   314     ret = false;
   315   }
   316   if (check_sample && _last_sampled_rs_lengths != 0) {
   317     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
   318     ret = false;
   319   }
   320   if (_head != NULL) {
   321     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
   322     ret = false;
   323   }
   324   if (!ret) {
   325     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   326   }
   328   if (ignore_scan_only_list)
   329     return ret;
   331   bool scan_only_ret = true;
   332   if (_scan_only_length != 0) {
   333     gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
   334                   _scan_only_length);
   335     scan_only_ret = false;
   336   }
   337   if (_scan_only_head != NULL) {
   338     gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
   339      scan_only_ret = false;
   340   }
   341   if (_scan_only_tail != NULL) {
   342     gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
   343     scan_only_ret = false;
   344   }
   345   if (!scan_only_ret) {
   346     gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
   347   }
   349   return ret && scan_only_ret;
   350 }
   352 void
   353 YoungList::rs_length_sampling_init() {
   354   _sampled_rs_lengths = 0;
   355   _curr               = _head;
   356 }
   358 bool
   359 YoungList::rs_length_sampling_more() {
   360   return _curr != NULL;
   361 }
   363 void
   364 YoungList::rs_length_sampling_next() {
   365   assert( _curr != NULL, "invariant" );
   366   _sampled_rs_lengths += _curr->rem_set()->occupied();
   367   _curr = _curr->get_next_young_region();
   368   if (_curr == NULL) {
   369     _last_sampled_rs_lengths = _sampled_rs_lengths;
   370     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
   371   }
   372 }
   374 void
   375 YoungList::reset_auxilary_lists() {
   376   // We could have just "moved" the scan-only list to the young list.
   377   // However, the scan-only list is ordered according to the region
   378   // age in descending order, so, by moving one entry at a time, we
   379   // ensure that it is recreated in ascending order.
   381   guarantee( is_empty(), "young list should be empty" );
   382   assert(check_list_well_formed(), "young list should be well formed");
   384   // Add survivor regions to SurvRateGroup.
   385   _g1h->g1_policy()->note_start_adding_survivor_regions();
   386   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   387   for (HeapRegion* curr = _survivor_head;
   388        curr != NULL;
   389        curr = curr->get_next_young_region()) {
   390     _g1h->g1_policy()->set_region_survivors(curr);
   391   }
   392   _g1h->g1_policy()->note_stop_adding_survivor_regions();
   394   if (_survivor_head != NULL) {
   395     _head           = _survivor_head;
   396     _length         = _survivor_length + _scan_only_length;
   397     _survivor_tail->set_next_young_region(_scan_only_head);
   398   } else {
   399     _head           = _scan_only_head;
   400     _length         = _scan_only_length;
   401   }
   403   for (HeapRegion* curr = _scan_only_head;
   404        curr != NULL;
   405        curr = curr->get_next_young_region()) {
   406     curr->recalculate_age_in_surv_rate_group();
   407   }
   408   _scan_only_head   = NULL;
   409   _scan_only_tail   = NULL;
   410   _scan_only_length = 0;
   411   _curr_scan_only   = NULL;
   413   _survivor_head    = NULL;
   414   _survivor_tail   = NULL;
   415   _survivor_length  = 0;
   416   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
   418   assert(check_list_well_formed(), "young list should be well formed");
   419 }
   421 void YoungList::print() {
   422   HeapRegion* lists[] = {_head,   _scan_only_head, _survivor_head};
   423   const char* names[] = {"YOUNG", "SCAN-ONLY",     "SURVIVOR"};
   425   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
   426     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
   427     HeapRegion *curr = lists[list];
   428     if (curr == NULL)
   429       gclog_or_tty->print_cr("  empty");
   430     while (curr != NULL) {
   431       gclog_or_tty->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
   432                              "age: %4d, y: %d, s-o: %d, surv: %d",
   433                              curr->bottom(), curr->end(),
   434                              curr->top(),
   435                              curr->prev_top_at_mark_start(),
   436                              curr->next_top_at_mark_start(),
   437                              curr->top_at_conc_mark_count(),
   438                              curr->age_in_surv_rate_group_cond(),
   439                              curr->is_young(),
   440                              curr->is_scan_only(),
   441                              curr->is_survivor());
   442       curr = curr->get_next_young_region();
   443     }
   444   }
   446   gclog_or_tty->print_cr("");
   447 }
   449 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
   450 {
   451   // Claim the right to put the region on the dirty cards region list
   452   // by installing a self pointer.
   453   HeapRegion* next = hr->get_next_dirty_cards_region();
   454   if (next == NULL) {
   455     HeapRegion* res = (HeapRegion*)
   456       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
   457                           NULL);
   458     if (res == NULL) {
   459       HeapRegion* head;
   460       do {
   461         // Put the region to the dirty cards region list.
   462         head = _dirty_cards_region_list;
   463         next = (HeapRegion*)
   464           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
   465         if (next == head) {
   466           assert(hr->get_next_dirty_cards_region() == hr,
   467                  "hr->get_next_dirty_cards_region() != hr");
   468           if (next == NULL) {
   469             // The last region in the list points to itself.
   470             hr->set_next_dirty_cards_region(hr);
   471           } else {
   472             hr->set_next_dirty_cards_region(next);
   473           }
   474         }
   475       } while (next != head);
   476     }
   477   }
   478 }
   480 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
   481 {
   482   HeapRegion* head;
   483   HeapRegion* hr;
   484   do {
   485     head = _dirty_cards_region_list;
   486     if (head == NULL) {
   487       return NULL;
   488     }
   489     HeapRegion* new_head = head->get_next_dirty_cards_region();
   490     if (head == new_head) {
   491       // The last region.
   492       new_head = NULL;
   493     }
   494     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
   495                                           head);
   496   } while (hr != head);
   497   assert(hr != NULL, "invariant");
   498   hr->set_next_dirty_cards_region(NULL);
   499   return hr;
   500 }
   502 void G1CollectedHeap::stop_conc_gc_threads() {
   503   _cg1r->stop();
   504   _czft->stop();
   505   _cmThread->stop();
   506 }
   509 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   510   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   511   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
   513   // Count the dirty cards at the start.
   514   CountNonCleanMemRegionClosure count1(this);
   515   ct_bs->mod_card_iterate(&count1);
   516   int orig_count = count1.n();
   518   // First clear the logged cards.
   519   ClearLoggedCardTableEntryClosure clear;
   520   dcqs.set_closure(&clear);
   521   dcqs.apply_closure_to_all_completed_buffers();
   522   dcqs.iterate_closure_all_threads(false);
   523   clear.print_histo();
   525   // Now ensure that there's no dirty cards.
   526   CountNonCleanMemRegionClosure count2(this);
   527   ct_bs->mod_card_iterate(&count2);
   528   if (count2.n() != 0) {
   529     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
   530                            count2.n(), orig_count);
   531   }
   532   guarantee(count2.n() == 0, "Card table should be clean.");
   534   RedirtyLoggedCardTableEntryClosure redirty;
   535   JavaThread::dirty_card_queue_set().set_closure(&redirty);
   536   dcqs.apply_closure_to_all_completed_buffers();
   537   dcqs.iterate_closure_all_threads(false);
   538   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
   539                          clear.calls(), orig_count);
   540   guarantee(redirty.calls() == clear.calls(),
   541             "Or else mechanism is broken.");
   543   CountNonCleanMemRegionClosure count3(this);
   544   ct_bs->mod_card_iterate(&count3);
   545   if (count3.n() != orig_count) {
   546     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
   547                            orig_count, count3.n());
   548     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   549   }
   551   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
   552 }
   554 // Private class members.
   556 G1CollectedHeap* G1CollectedHeap::_g1h;
   558 // Private methods.
   560 // Finds a HeapRegion that can be used to allocate a given size of block.
   563 HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
   564                                                  bool do_expand,
   565                                                  bool zero_filled) {
   566   ConcurrentZFThread::note_region_alloc();
   567   HeapRegion* res = alloc_free_region_from_lists(zero_filled);
   568   if (res == NULL && do_expand) {
   569     expand(word_size * HeapWordSize);
   570     res = alloc_free_region_from_lists(zero_filled);
   571     assert(res == NULL ||
   572            (!res->isHumongous() &&
   573             (!zero_filled ||
   574              res->zero_fill_state() == HeapRegion::Allocated)),
   575            "Alloc Regions must be zero filled (and non-H)");
   576   }
   577   if (res != NULL && res->is_empty()) _free_regions--;
   578   assert(res == NULL ||
   579          (!res->isHumongous() &&
   580           (!zero_filled ||
   581            res->zero_fill_state() == HeapRegion::Allocated)),
   582          "Non-young alloc Regions must be zero filled (and non-H)");
   584   if (G1PrintRegions) {
   585     if (res != NULL) {
   586       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
   587                              "top "PTR_FORMAT,
   588                              res->hrs_index(), res->bottom(), res->end(), res->top());
   589     }
   590   }
   592   return res;
   593 }
   595 HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
   596                                                          size_t word_size,
   597                                                          bool zero_filled) {
   598   HeapRegion* alloc_region = NULL;
   599   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
   600     alloc_region = newAllocRegion_work(word_size, true, zero_filled);
   601     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
   602       alloc_region->set_survivor();
   603     }
   604     ++_gc_alloc_region_counts[purpose];
   605   } else {
   606     g1_policy()->note_alloc_region_limit_reached(purpose);
   607   }
   608   return alloc_region;
   609 }
   611 // If could fit into free regions w/o expansion, try.
   612 // Otherwise, if can expand, do so.
   613 // Otherwise, if using ex regions might help, try with ex given back.
   614 HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
   615   assert(regions_accounted_for(), "Region leakage!");
   617   // We can't allocate H regions while cleanupComplete is running, since
   618   // some of the regions we find to be empty might not yet be added to the
   619   // unclean list.  (If we're already at a safepoint, this call is
   620   // unnecessary, not to mention wrong.)
   621   if (!SafepointSynchronize::is_at_safepoint())
   622     wait_for_cleanup_complete();
   624   size_t num_regions =
   625     round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
   627   // Special case if < one region???
   629   // Remember the ft size.
   630   size_t x_size = expansion_regions();
   632   HeapWord* res = NULL;
   633   bool eliminated_allocated_from_lists = false;
   635   // Can the allocation potentially fit in the free regions?
   636   if (free_regions() >= num_regions) {
   637     res = _hrs->obj_allocate(word_size);
   638   }
   639   if (res == NULL) {
   640     // Try expansion.
   641     size_t fs = _hrs->free_suffix();
   642     if (fs + x_size >= num_regions) {
   643       expand((num_regions - fs) * HeapRegion::GrainBytes);
   644       res = _hrs->obj_allocate(word_size);
   645       assert(res != NULL, "This should have worked.");
   646     } else {
   647       // Expansion won't help.  Are there enough free regions if we get rid
   648       // of reservations?
   649       size_t avail = free_regions();
   650       if (avail >= num_regions) {
   651         res = _hrs->obj_allocate(word_size);
   652         if (res != NULL) {
   653           remove_allocated_regions_from_lists();
   654           eliminated_allocated_from_lists = true;
   655         }
   656       }
   657     }
   658   }
   659   if (res != NULL) {
   660     // Increment by the number of regions allocated.
   661     // FIXME: Assumes regions all of size GrainBytes.
   662 #ifndef PRODUCT
   663     mr_bs()->verify_clean_region(MemRegion(res, res + num_regions *
   664                                            HeapRegion::GrainWords));
   665 #endif
   666     if (!eliminated_allocated_from_lists)
   667       remove_allocated_regions_from_lists();
   668     _summary_bytes_used += word_size * HeapWordSize;
   669     _free_regions -= num_regions;
   670     _num_humongous_regions += (int) num_regions;
   671   }
   672   assert(regions_accounted_for(), "Region Leakage");
   673   return res;
   674 }
   676 HeapWord*
   677 G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   678                                          bool permit_collection_pause) {
   679   HeapWord* res = NULL;
   680   HeapRegion* allocated_young_region = NULL;
   682   assert( SafepointSynchronize::is_at_safepoint() ||
   683           Heap_lock->owned_by_self(), "pre condition of the call" );
   685   if (isHumongous(word_size)) {
   686     // Allocation of a humongous object can, in a sense, complete a
   687     // partial region, if the previous alloc was also humongous, and
   688     // caused the test below to succeed.
   689     if (permit_collection_pause)
   690       do_collection_pause_if_appropriate(word_size);
   691     res = humongousObjAllocate(word_size);
   692     assert(_cur_alloc_region == NULL
   693            || !_cur_alloc_region->isHumongous(),
   694            "Prevent a regression of this bug.");
   696   } else {
   697     // We may have concurrent cleanup working at the time. Wait for it
   698     // to complete. In the future we would probably want to make the
   699     // concurrent cleanup truly concurrent by decoupling it from the
   700     // allocation.
   701     if (!SafepointSynchronize::is_at_safepoint())
   702       wait_for_cleanup_complete();
   703     // If we do a collection pause, this will be reset to a non-NULL
   704     // value.  If we don't, nulling here ensures that we allocate a new
   705     // region below.
   706     if (_cur_alloc_region != NULL) {
   707       // We're finished with the _cur_alloc_region.
   708       _summary_bytes_used += _cur_alloc_region->used();
   709       _cur_alloc_region = NULL;
   710     }
   711     assert(_cur_alloc_region == NULL, "Invariant.");
   712     // Completion of a heap region is perhaps a good point at which to do
   713     // a collection pause.
   714     if (permit_collection_pause)
   715       do_collection_pause_if_appropriate(word_size);
   716     // Make sure we have an allocation region available.
   717     if (_cur_alloc_region == NULL) {
   718       if (!SafepointSynchronize::is_at_safepoint())
   719         wait_for_cleanup_complete();
   720       bool next_is_young = should_set_young_locked();
   721       // If the next region is not young, make sure it's zero-filled.
   722       _cur_alloc_region = newAllocRegion(word_size, !next_is_young);
   723       if (_cur_alloc_region != NULL) {
   724         _summary_bytes_used -= _cur_alloc_region->used();
   725         if (next_is_young) {
   726           set_region_short_lived_locked(_cur_alloc_region);
   727           allocated_young_region = _cur_alloc_region;
   728         }
   729       }
   730     }
   731     assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
   732            "Prevent a regression of this bug.");
   734     // Now retry the allocation.
   735     if (_cur_alloc_region != NULL) {
   736       res = _cur_alloc_region->allocate(word_size);
   737     }
   738   }
   740   // NOTE: fails frequently in PRT
   741   assert(regions_accounted_for(), "Region leakage!");
   743   if (res != NULL) {
   744     if (!SafepointSynchronize::is_at_safepoint()) {
   745       assert( permit_collection_pause, "invariant" );
   746       assert( Heap_lock->owned_by_self(), "invariant" );
   747       Heap_lock->unlock();
   748     }
   750     if (allocated_young_region != NULL) {
   751       HeapRegion* hr = allocated_young_region;
   752       HeapWord* bottom = hr->bottom();
   753       HeapWord* end = hr->end();
   754       MemRegion mr(bottom, end);
   755       ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
   756     }
   757   }
   759   assert( SafepointSynchronize::is_at_safepoint() ||
   760           (res == NULL && Heap_lock->owned_by_self()) ||
   761           (res != NULL && !Heap_lock->owned_by_self()),
   762           "post condition of the call" );
   764   return res;
   765 }
   767 HeapWord*
   768 G1CollectedHeap::mem_allocate(size_t word_size,
   769                               bool   is_noref,
   770                               bool   is_tlab,
   771                               bool* gc_overhead_limit_was_exceeded) {
   772   debug_only(check_for_valid_allocation_state());
   773   assert(no_gc_in_progress(), "Allocation during gc not allowed");
   774   HeapWord* result = NULL;
   776   // Loop until the allocation is satisified,
   777   // or unsatisfied after GC.
   778   for (int try_count = 1; /* return or throw */; try_count += 1) {
   779     int gc_count_before;
   780     {
   781       Heap_lock->lock();
   782       result = attempt_allocation(word_size);
   783       if (result != NULL) {
   784         // attempt_allocation should have unlocked the heap lock
   785         assert(is_in(result), "result not in heap");
   786         return result;
   787       }
   788       // Read the gc count while the heap lock is held.
   789       gc_count_before = SharedHeap::heap()->total_collections();
   790       Heap_lock->unlock();
   791     }
   793     // Create the garbage collection operation...
   794     VM_G1CollectForAllocation op(word_size,
   795                                  gc_count_before);
   797     // ...and get the VM thread to execute it.
   798     VMThread::execute(&op);
   799     if (op.prologue_succeeded()) {
   800       result = op.result();
   801       assert(result == NULL || is_in(result), "result not in heap");
   802       return result;
   803     }
   805     // Give a warning if we seem to be looping forever.
   806     if ((QueuedAllocationWarningCount > 0) &&
   807         (try_count % QueuedAllocationWarningCount == 0)) {
   808       warning("G1CollectedHeap::mem_allocate_work retries %d times",
   809               try_count);
   810     }
   811   }
   812 }
   814 void G1CollectedHeap::abandon_cur_alloc_region() {
   815   if (_cur_alloc_region != NULL) {
   816     // We're finished with the _cur_alloc_region.
   817     if (_cur_alloc_region->is_empty()) {
   818       _free_regions++;
   819       free_region(_cur_alloc_region);
   820     } else {
   821       _summary_bytes_used += _cur_alloc_region->used();
   822     }
   823     _cur_alloc_region = NULL;
   824   }
   825 }
   827 void G1CollectedHeap::abandon_gc_alloc_regions() {
   828   // first, make sure that the GC alloc region list is empty (it should!)
   829   assert(_gc_alloc_region_list == NULL, "invariant");
   830   release_gc_alloc_regions(true /* totally */);
   831 }
   833 class PostMCRemSetClearClosure: public HeapRegionClosure {
   834   ModRefBarrierSet* _mr_bs;
   835 public:
   836   PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
   837   bool doHeapRegion(HeapRegion* r) {
   838     r->reset_gc_time_stamp();
   839     if (r->continuesHumongous())
   840       return false;
   841     HeapRegionRemSet* hrrs = r->rem_set();
   842     if (hrrs != NULL) hrrs->clear();
   843     // You might think here that we could clear just the cards
   844     // corresponding to the used region.  But no: if we leave a dirty card
   845     // in a region we might allocate into, then it would prevent that card
   846     // from being enqueued, and cause it to be missed.
   847     // Re: the performance cost: we shouldn't be doing full GC anyway!
   848     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
   849     return false;
   850   }
   851 };
   854 class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
   855   ModRefBarrierSet* _mr_bs;
   856 public:
   857   PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
   858   bool doHeapRegion(HeapRegion* r) {
   859     if (r->continuesHumongous()) return false;
   860     if (r->used_region().word_size() != 0) {
   861       _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
   862     }
   863     return false;
   864   }
   865 };
   867 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
   868   G1CollectedHeap*   _g1h;
   869   UpdateRSOopClosure _cl;
   870   int                _worker_i;
   871 public:
   872   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
   873     _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
   874     _worker_i(worker_i),
   875     _g1h(g1)
   876   { }
   877   bool doHeapRegion(HeapRegion* r) {
   878     if (!r->continuesHumongous()) {
   879       _cl.set_from(r);
   880       r->oop_iterate(&_cl);
   881     }
   882     return false;
   883   }
   884 };
   886 class ParRebuildRSTask: public AbstractGangTask {
   887   G1CollectedHeap* _g1;
   888 public:
   889   ParRebuildRSTask(G1CollectedHeap* g1)
   890     : AbstractGangTask("ParRebuildRSTask"),
   891       _g1(g1)
   892   { }
   894   void work(int i) {
   895     RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
   896     _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
   897                                          HeapRegion::RebuildRSClaimValue);
   898   }
   899 };
   901 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
   902                                     size_t word_size) {
   903   ResourceMark rm;
   905   if (PrintHeapAtGC) {
   906     Universe::print_heap_before_gc();
   907   }
   909   if (full && DisableExplicitGC) {
   910     gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
   911     return;
   912   }
   914   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   915   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
   917   if (GC_locker::is_active()) {
   918     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   919   }
   921   {
   922     IsGCActiveMark x;
   924     // Timing
   925     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   926     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   927     TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
   929     double start = os::elapsedTime();
   930     GCOverheadReporter::recordSTWStart(start);
   931     g1_policy()->record_full_collection_start();
   933     gc_prologue(true);
   934     increment_total_collections(true /* full gc */);
   936     size_t g1h_prev_used = used();
   937     assert(used() == recalculate_used(), "Should be equal");
   939     if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
   940       HandleMark hm;  // Discard invalid handles created during verification
   941       prepare_for_verify();
   942       gclog_or_tty->print(" VerifyBeforeGC:");
   943       Universe::verify(true);
   944     }
   945     assert(regions_accounted_for(), "Region leakage!");
   947     COMPILER2_PRESENT(DerivedPointerTable::clear());
   949     // We want to discover references, but not process them yet.
   950     // This mode is disabled in
   951     // instanceRefKlass::process_discovered_references if the
   952     // generation does some collection work, or
   953     // instanceRefKlass::enqueue_discovered_references if the
   954     // generation returns without doing any work.
   955     ref_processor()->disable_discovery();
   956     ref_processor()->abandon_partial_discovery();
   957     ref_processor()->verify_no_references_recorded();
   959     // Abandon current iterations of concurrent marking and concurrent
   960     // refinement, if any are in progress.
   961     concurrent_mark()->abort();
   963     // Make sure we'll choose a new allocation region afterwards.
   964     abandon_cur_alloc_region();
   965     abandon_gc_alloc_regions();
   966     assert(_cur_alloc_region == NULL, "Invariant.");
   967     g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
   968     tear_down_region_lists();
   969     set_used_regions_to_need_zero_fill();
   970     if (g1_policy()->in_young_gc_mode()) {
   971       empty_young_list();
   972       g1_policy()->set_full_young_gcs(true);
   973     }
   975     // Temporarily make reference _discovery_ single threaded (non-MT).
   976     ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
   978     // Temporarily make refs discovery atomic
   979     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
   981     // Temporarily clear _is_alive_non_header
   982     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
   984     ref_processor()->enable_discovery();
   985     ref_processor()->setup_policy(clear_all_soft_refs);
   987     // Do collection work
   988     {
   989       HandleMark hm;  // Discard invalid handles created during gc
   990       G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
   991     }
   992     // Because freeing humongous regions may have added some unclean
   993     // regions, it is necessary to tear down again before rebuilding.
   994     tear_down_region_lists();
   995     rebuild_region_lists();
   997     _summary_bytes_used = recalculate_used();
   999     ref_processor()->enqueue_discovered_references();
  1001     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  1003     if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  1004       HandleMark hm;  // Discard invalid handles created during verification
  1005       gclog_or_tty->print(" VerifyAfterGC:");
  1006       prepare_for_verify();
  1007       Universe::verify(false);
  1009     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
  1011     reset_gc_time_stamp();
  1012     // Since everything potentially moved, we will clear all remembered
  1013     // sets, and clear all cards.  Later we will rebuild remebered
  1014     // sets. We will also reset the GC time stamps of the regions.
  1015     PostMCRemSetClearClosure rs_clear(mr_bs());
  1016     heap_region_iterate(&rs_clear);
  1018     // Resize the heap if necessary.
  1019     resize_if_necessary_after_full_collection(full ? 0 : word_size);
  1021     if (_cg1r->use_cache()) {
  1022       _cg1r->clear_and_record_card_counts();
  1023       _cg1r->clear_hot_cache();
  1026     // Rebuild remembered sets of all regions.
  1027     if (ParallelGCThreads > 0) {
  1028       ParRebuildRSTask rebuild_rs_task(this);
  1029       assert(check_heap_region_claim_values(
  1030              HeapRegion::InitialClaimValue), "sanity check");
  1031       set_par_threads(workers()->total_workers());
  1032       workers()->run_task(&rebuild_rs_task);
  1033       set_par_threads(0);
  1034       assert(check_heap_region_claim_values(
  1035              HeapRegion::RebuildRSClaimValue), "sanity check");
  1036       reset_heap_region_claim_values();
  1037     } else {
  1038       RebuildRSOutOfRegionClosure rebuild_rs(this);
  1039       heap_region_iterate(&rebuild_rs);
  1042     if (PrintGC) {
  1043       print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  1046     if (true) { // FIXME
  1047       // Ask the permanent generation to adjust size for full collections
  1048       perm()->compute_new_size();
  1051     double end = os::elapsedTime();
  1052     GCOverheadReporter::recordSTWEnd(end);
  1053     g1_policy()->record_full_collection_end();
  1055 #ifdef TRACESPINNING
  1056     ParallelTaskTerminator::print_termination_counts();
  1057 #endif
  1059     gc_epilogue(true);
  1061     // Discard all rset updates
  1062     JavaThread::dirty_card_queue_set().abandon_logs();
  1063     assert(!G1DeferredRSUpdate
  1064            || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  1065     assert(regions_accounted_for(), "Region leakage!");
  1068   if (g1_policy()->in_young_gc_mode()) {
  1069     _young_list->reset_sampled_info();
  1070     assert( check_young_list_empty(false, false),
  1071             "young list should be empty at this point");
  1074   if (PrintHeapAtGC) {
  1075     Universe::print_heap_after_gc();
  1079 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
  1080   do_collection(true, clear_all_soft_refs, 0);
  1083 // This code is mostly copied from TenuredGeneration.
  1084 void
  1085 G1CollectedHeap::
  1086 resize_if_necessary_after_full_collection(size_t word_size) {
  1087   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
  1089   // Include the current allocation, if any, and bytes that will be
  1090   // pre-allocated to support collections, as "used".
  1091   const size_t used_after_gc = used();
  1092   const size_t capacity_after_gc = capacity();
  1093   const size_t free_after_gc = capacity_after_gc - used_after_gc;
  1095   // We don't have floating point command-line arguments
  1096   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100;
  1097   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  1098   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
  1099   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  1101   size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage);
  1102   size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage);
  1104   // Don't shrink less than the initial size.
  1105   minimum_desired_capacity =
  1106     MAX2(minimum_desired_capacity,
  1107          collector_policy()->initial_heap_byte_size());
  1108   maximum_desired_capacity =
  1109     MAX2(maximum_desired_capacity,
  1110          collector_policy()->initial_heap_byte_size());
  1112   // We are failing here because minimum_desired_capacity is
  1113   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
  1114   assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check");
  1116   if (PrintGC && Verbose) {
  1117     const double free_percentage = ((double)free_after_gc) / capacity();
  1118     gclog_or_tty->print_cr("Computing new size after full GC ");
  1119     gclog_or_tty->print_cr("  "
  1120                            "  minimum_free_percentage: %6.2f",
  1121                            minimum_free_percentage);
  1122     gclog_or_tty->print_cr("  "
  1123                            "  maximum_free_percentage: %6.2f",
  1124                            maximum_free_percentage);
  1125     gclog_or_tty->print_cr("  "
  1126                            "  capacity: %6.1fK"
  1127                            "  minimum_desired_capacity: %6.1fK"
  1128                            "  maximum_desired_capacity: %6.1fK",
  1129                            capacity() / (double) K,
  1130                            minimum_desired_capacity / (double) K,
  1131                            maximum_desired_capacity / (double) K);
  1132     gclog_or_tty->print_cr("  "
  1133                            "   free_after_gc   : %6.1fK"
  1134                            "   used_after_gc   : %6.1fK",
  1135                            free_after_gc / (double) K,
  1136                            used_after_gc / (double) K);
  1137     gclog_or_tty->print_cr("  "
  1138                            "   free_percentage: %6.2f",
  1139                            free_percentage);
  1141   if (capacity() < minimum_desired_capacity) {
  1142     // Don't expand unless it's significant
  1143     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  1144     expand(expand_bytes);
  1145     if (PrintGC && Verbose) {
  1146       gclog_or_tty->print_cr("    expanding:"
  1147                              "  minimum_desired_capacity: %6.1fK"
  1148                              "  expand_bytes: %6.1fK",
  1149                              minimum_desired_capacity / (double) K,
  1150                              expand_bytes / (double) K);
  1153     // No expansion, now see if we want to shrink
  1154   } else if (capacity() > maximum_desired_capacity) {
  1155     // Capacity too large, compute shrinking size
  1156     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
  1157     shrink(shrink_bytes);
  1158     if (PrintGC && Verbose) {
  1159       gclog_or_tty->print_cr("  "
  1160                              "  shrinking:"
  1161                              "  initSize: %.1fK"
  1162                              "  maximum_desired_capacity: %.1fK",
  1163                              collector_policy()->initial_heap_byte_size() / (double) K,
  1164                              maximum_desired_capacity / (double) K);
  1165       gclog_or_tty->print_cr("  "
  1166                              "  shrink_bytes: %.1fK",
  1167                              shrink_bytes / (double) K);
  1173 HeapWord*
  1174 G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
  1175   HeapWord* result = NULL;
  1177   // In a G1 heap, we're supposed to keep allocation from failing by
  1178   // incremental pauses.  Therefore, at least for now, we'll favor
  1179   // expansion over collection.  (This might change in the future if we can
  1180   // do something smarter than full collection to satisfy a failed alloc.)
  1182   result = expand_and_allocate(word_size);
  1183   if (result != NULL) {
  1184     assert(is_in(result), "result not in heap");
  1185     return result;
  1188   // OK, I guess we have to try collection.
  1190   do_collection(false, false, word_size);
  1192   result = attempt_allocation(word_size, /*permit_collection_pause*/false);
  1194   if (result != NULL) {
  1195     assert(is_in(result), "result not in heap");
  1196     return result;
  1199   // Try collecting soft references.
  1200   do_collection(false, true, word_size);
  1201   result = attempt_allocation(word_size, /*permit_collection_pause*/false);
  1202   if (result != NULL) {
  1203     assert(is_in(result), "result not in heap");
  1204     return result;
  1207   // What else?  We might try synchronous finalization later.  If the total
  1208   // space available is large enough for the allocation, then a more
  1209   // complete compaction phase than we've tried so far might be
  1210   // appropriate.
  1211   return NULL;
  1214 // Attempting to expand the heap sufficiently
  1215 // to support an allocation of the given "word_size".  If
  1216 // successful, perform the allocation and return the address of the
  1217 // allocated block, or else "NULL".
  1219 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
  1220   size_t expand_bytes = word_size * HeapWordSize;
  1221   if (expand_bytes < MinHeapDeltaBytes) {
  1222     expand_bytes = MinHeapDeltaBytes;
  1224   expand(expand_bytes);
  1225   assert(regions_accounted_for(), "Region leakage!");
  1226   HeapWord* result = attempt_allocation(word_size, false /* permit_collection_pause */);
  1227   return result;
  1230 size_t G1CollectedHeap::free_region_if_totally_empty(HeapRegion* hr) {
  1231   size_t pre_used = 0;
  1232   size_t cleared_h_regions = 0;
  1233   size_t freed_regions = 0;
  1234   UncleanRegionList local_list;
  1235   free_region_if_totally_empty_work(hr, pre_used, cleared_h_regions,
  1236                                     freed_regions, &local_list);
  1238   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
  1239                           &local_list);
  1240   return pre_used;
  1243 void
  1244 G1CollectedHeap::free_region_if_totally_empty_work(HeapRegion* hr,
  1245                                                    size_t& pre_used,
  1246                                                    size_t& cleared_h,
  1247                                                    size_t& freed_regions,
  1248                                                    UncleanRegionList* list,
  1249                                                    bool par) {
  1250   assert(!hr->continuesHumongous(), "should have filtered these out");
  1251   size_t res = 0;
  1252   if (hr->used() > 0 && hr->garbage_bytes() == hr->used() &&
  1253       !hr->is_young()) {
  1254     if (G1PolicyVerbose > 0)
  1255       gclog_or_tty->print_cr("Freeing empty region "PTR_FORMAT "(" SIZE_FORMAT " bytes)"
  1256                                                                                " during cleanup", hr, hr->used());
  1257     free_region_work(hr, pre_used, cleared_h, freed_regions, list, par);
  1261 // FIXME: both this and shrink could probably be more efficient by
  1262 // doing one "VirtualSpace::expand_by" call rather than several.
  1263 void G1CollectedHeap::expand(size_t expand_bytes) {
  1264   size_t old_mem_size = _g1_storage.committed_size();
  1265   // We expand by a minimum of 1K.
  1266   expand_bytes = MAX2(expand_bytes, (size_t)K);
  1267   size_t aligned_expand_bytes =
  1268     ReservedSpace::page_align_size_up(expand_bytes);
  1269   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
  1270                                        HeapRegion::GrainBytes);
  1271   expand_bytes = aligned_expand_bytes;
  1272   while (expand_bytes > 0) {
  1273     HeapWord* base = (HeapWord*)_g1_storage.high();
  1274     // Commit more storage.
  1275     bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes);
  1276     if (!successful) {
  1277         expand_bytes = 0;
  1278     } else {
  1279       expand_bytes -= HeapRegion::GrainBytes;
  1280       // Expand the committed region.
  1281       HeapWord* high = (HeapWord*) _g1_storage.high();
  1282       _g1_committed.set_end(high);
  1283       // Create a new HeapRegion.
  1284       MemRegion mr(base, high);
  1285       bool is_zeroed = !_g1_max_committed.contains(base);
  1286       HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed);
  1288       // Now update max_committed if necessary.
  1289       _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high));
  1291       // Add it to the HeapRegionSeq.
  1292       _hrs->insert(hr);
  1293       // Set the zero-fill state, according to whether it's already
  1294       // zeroed.
  1296         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  1297         if (is_zeroed) {
  1298           hr->set_zero_fill_complete();
  1299           put_free_region_on_list_locked(hr);
  1300         } else {
  1301           hr->set_zero_fill_needed();
  1302           put_region_on_unclean_list_locked(hr);
  1305       _free_regions++;
  1306       // And we used up an expansion region to create it.
  1307       _expansion_regions--;
  1308       // Tell the cardtable about it.
  1309       Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1310       // And the offset table as well.
  1311       _bot_shared->resize(_g1_committed.word_size());
  1314   if (Verbose && PrintGC) {
  1315     size_t new_mem_size = _g1_storage.committed_size();
  1316     gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK",
  1317                            old_mem_size/K, aligned_expand_bytes/K,
  1318                            new_mem_size/K);
  1322 void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
  1324   size_t old_mem_size = _g1_storage.committed_size();
  1325   size_t aligned_shrink_bytes =
  1326     ReservedSpace::page_align_size_down(shrink_bytes);
  1327   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
  1328                                          HeapRegion::GrainBytes);
  1329   size_t num_regions_deleted = 0;
  1330   MemRegion mr = _hrs->shrink_by(aligned_shrink_bytes, num_regions_deleted);
  1332   assert(mr.end() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1333   if (mr.byte_size() > 0)
  1334     _g1_storage.shrink_by(mr.byte_size());
  1335   assert(mr.start() == (HeapWord*)_g1_storage.high(), "Bad shrink!");
  1337   _g1_committed.set_end(mr.start());
  1338   _free_regions -= num_regions_deleted;
  1339   _expansion_regions += num_regions_deleted;
  1341   // Tell the cardtable about it.
  1342   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
  1344   // And the offset table as well.
  1345   _bot_shared->resize(_g1_committed.word_size());
  1347   HeapRegionRemSet::shrink_heap(n_regions());
  1349   if (Verbose && PrintGC) {
  1350     size_t new_mem_size = _g1_storage.committed_size();
  1351     gclog_or_tty->print_cr("Shrinking garbage-first heap from %ldK by %ldK to %ldK",
  1352                            old_mem_size/K, aligned_shrink_bytes/K,
  1353                            new_mem_size/K);
  1357 void G1CollectedHeap::shrink(size_t shrink_bytes) {
  1358   release_gc_alloc_regions(true /* totally */);
  1359   tear_down_region_lists();  // We will rebuild them in a moment.
  1360   shrink_helper(shrink_bytes);
  1361   rebuild_region_lists();
  1364 // Public methods.
  1366 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
  1367 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
  1368 #endif // _MSC_VER
  1371 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
  1372   SharedHeap(policy_),
  1373   _g1_policy(policy_),
  1374   _ref_processor(NULL),
  1375   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  1376   _bot_shared(NULL),
  1377   _par_alloc_during_gc_lock(Mutex::leaf, "par alloc during GC lock"),
  1378   _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
  1379   _evac_failure_scan_stack(NULL) ,
  1380   _mark_in_progress(false),
  1381   _cg1r(NULL), _czft(NULL), _summary_bytes_used(0),
  1382   _cur_alloc_region(NULL),
  1383   _refine_cte_cl(NULL),
  1384   _free_region_list(NULL), _free_region_list_size(0),
  1385   _free_regions(0),
  1386   _full_collection(false),
  1387   _unclean_region_list(),
  1388   _unclean_regions_coming(false),
  1389   _young_list(new YoungList(this)),
  1390   _gc_time_stamp(0),
  1391   _surviving_young_words(NULL),
  1392   _in_cset_fast_test(NULL),
  1393   _in_cset_fast_test_base(NULL),
  1394   _dirty_cards_region_list(NULL) {
  1395   _g1h = this; // To catch bugs.
  1396   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  1397     vm_exit_during_initialization("Failed necessary allocation.");
  1399   int n_queues = MAX2((int)ParallelGCThreads, 1);
  1400   _task_queues = new RefToScanQueueSet(n_queues);
  1402   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
  1403   assert(n_rem_sets > 0, "Invariant.");
  1405   HeapRegionRemSetIterator** iter_arr =
  1406     NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues);
  1407   for (int i = 0; i < n_queues; i++) {
  1408     iter_arr[i] = new HeapRegionRemSetIterator();
  1410   _rem_set_iterator = iter_arr;
  1412   for (int i = 0; i < n_queues; i++) {
  1413     RefToScanQueue* q = new RefToScanQueue();
  1414     q->initialize();
  1415     _task_queues->register_queue(i, q);
  1418   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  1419     _gc_alloc_regions[ap]          = NULL;
  1420     _gc_alloc_region_counts[ap]    = 0;
  1421     _retained_gc_alloc_regions[ap] = NULL;
  1422     // by default, we do not retain a GC alloc region for each ap;
  1423     // we'll override this, when appropriate, below
  1424     _retain_gc_alloc_region[ap]    = false;
  1427   // We will try to remember the last half-full tenured region we
  1428   // allocated to at the end of a collection so that we can re-use it
  1429   // during the next collection.
  1430   _retain_gc_alloc_region[GCAllocForTenured]  = true;
  1432   guarantee(_task_queues != NULL, "task_queues allocation failure.");
  1435 jint G1CollectedHeap::initialize() {
  1436   os::enable_vtime();
  1438   // Necessary to satisfy locking discipline assertions.
  1440   MutexLocker x(Heap_lock);
  1442   // While there are no constraints in the GC code that HeapWordSize
  1443   // be any particular value, there are multiple other areas in the
  1444   // system which believe this to be true (e.g. oop->object_size in some
  1445   // cases incorrectly returns the size in wordSize units rather than
  1446   // HeapWordSize).
  1447   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  1449   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
  1450   size_t max_byte_size = collector_policy()->max_heap_byte_size();
  1452   // Ensure that the sizes are properly aligned.
  1453   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1454   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
  1456   // We allocate this in any case, but only do no work if the command line
  1457   // param is off.
  1458   _cg1r = new ConcurrentG1Refine();
  1460   // Reserve the maximum.
  1461   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
  1462   // Includes the perm-gen.
  1464   const size_t total_reserved = max_byte_size + pgs->max_size();
  1465   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
  1467   ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
  1468                         HeapRegion::GrainBytes,
  1469                         false /*ism*/, addr);
  1471   if (UseCompressedOops) {
  1472     if (addr != NULL && !heap_rs.is_reserved()) {
  1473       // Failed to reserve at specified address - the requested memory
  1474       // region is taken already, for example, by 'java' launcher.
  1475       // Try again to reserver heap higher.
  1476       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
  1477       ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
  1478                              false /*ism*/, addr);
  1479       if (addr != NULL && !heap_rs0.is_reserved()) {
  1480         // Failed to reserve at specified address again - give up.
  1481         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
  1482         assert(addr == NULL, "");
  1483         ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
  1484                                false /*ism*/, addr);
  1485         heap_rs = heap_rs1;
  1486       } else {
  1487         heap_rs = heap_rs0;
  1492   if (!heap_rs.is_reserved()) {
  1493     vm_exit_during_initialization("Could not reserve enough space for object heap");
  1494     return JNI_ENOMEM;
  1497   // It is important to do this in a way such that concurrent readers can't
  1498   // temporarily think somethings in the heap.  (I've actually seen this
  1499   // happen in asserts: DLD.)
  1500   _reserved.set_word_size(0);
  1501   _reserved.set_start((HeapWord*)heap_rs.base());
  1502   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
  1504   _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
  1506   _num_humongous_regions = 0;
  1508   // Create the gen rem set (and barrier set) for the entire reserved region.
  1509   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
  1510   set_barrier_set(rem_set()->bs());
  1511   if (barrier_set()->is_a(BarrierSet::ModRef)) {
  1512     _mr_bs = (ModRefBarrierSet*)_barrier_set;
  1513   } else {
  1514     vm_exit_during_initialization("G1 requires a mod ref bs.");
  1515     return JNI_ENOMEM;
  1518   // Also create a G1 rem set.
  1519   if (G1UseHRIntoRS) {
  1520     if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
  1521       _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
  1522     } else {
  1523       vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
  1524       return JNI_ENOMEM;
  1526   } else {
  1527     _g1_rem_set = new StupidG1RemSet(this);
  1530   // Carve out the G1 part of the heap.
  1532   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
  1533   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
  1534                            g1_rs.size()/HeapWordSize);
  1535   ReservedSpace perm_gen_rs = heap_rs.last_part(max_byte_size);
  1537   _perm_gen = pgs->init(perm_gen_rs, pgs->init_size(), rem_set());
  1539   _g1_storage.initialize(g1_rs, 0);
  1540   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
  1541   _g1_max_committed = _g1_committed;
  1542   _hrs = new HeapRegionSeq(_expansion_regions);
  1543   guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
  1544   guarantee(_cur_alloc_region == NULL, "from constructor");
  1546   // 6843694 - ensure that the maximum region index can fit
  1547   // in the remembered set structures.
  1548   const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
  1549   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
  1551   const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
  1552   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
  1553   guarantee(cards_per_region < max_cards_per_region, "too many cards per region");
  1555   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
  1556                                              heap_word_size(init_byte_size));
  1558   _g1h = this;
  1560   // Create the ConcurrentMark data structure and thread.
  1561   // (Must do this late, so that "max_regions" is defined.)
  1562   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
  1563   _cmThread = _cm->cmThread();
  1565   // ...and the concurrent zero-fill thread, if necessary.
  1566   if (G1ConcZeroFill) {
  1567     _czft = new ConcurrentZFThread();
  1570   // Initialize the from_card cache structure of HeapRegionRemSet.
  1571   HeapRegionRemSet::init_heap(max_regions());
  1573   // Now expand into the initial heap size.
  1574   expand(init_byte_size);
  1576   // Perform any initialization actions delegated to the policy.
  1577   g1_policy()->init();
  1579   g1_policy()->note_start_of_mark_thread();
  1581   _refine_cte_cl =
  1582     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
  1583                                     g1_rem_set(),
  1584                                     concurrent_g1_refine());
  1585   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
  1587   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
  1588                                                SATB_Q_FL_lock,
  1589                                                0,
  1590                                                Shared_SATB_Q_lock);
  1592   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1593                                                 DirtyCardQ_FL_lock,
  1594                                                 G1UpdateBufferQueueMaxLength,
  1595                                                 Shared_DirtyCardQ_lock);
  1597   if (G1DeferredRSUpdate) {
  1598     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
  1599                                       DirtyCardQ_FL_lock,
  1600                                       0,
  1601                                       Shared_DirtyCardQ_lock,
  1602                                       &JavaThread::dirty_card_queue_set());
  1604   // In case we're keeping closure specialization stats, initialize those
  1605   // counts and that mechanism.
  1606   SpecializationStats::clear();
  1608   _gc_alloc_region_list = NULL;
  1610   // Do later initialization work for concurrent refinement.
  1611   _cg1r->init();
  1613   const char* group_names[] = { "CR", "ZF", "CM", "CL" };
  1614   GCOverheadReporter::initGCOverheadReporter(4, group_names);
  1616   return JNI_OK;
  1619 void G1CollectedHeap::ref_processing_init() {
  1620   SharedHeap::ref_processing_init();
  1621   MemRegion mr = reserved_region();
  1622   _ref_processor = ReferenceProcessor::create_ref_processor(
  1623                                          mr,    // span
  1624                                          false, // Reference discovery is not atomic
  1625                                                 // (though it shouldn't matter here.)
  1626                                          true,  // mt_discovery
  1627                                          NULL,  // is alive closure: need to fill this in for efficiency
  1628                                          ParallelGCThreads,
  1629                                          ParallelRefProcEnabled,
  1630                                          true); // Setting next fields of discovered
  1631                                                 // lists requires a barrier.
  1634 size_t G1CollectedHeap::capacity() const {
  1635   return _g1_committed.byte_size();
  1638 void G1CollectedHeap::iterate_dirty_card_closure(bool concurrent,
  1639                                                  int worker_i) {
  1640   // Clean cards in the hot card cache
  1641   concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set());
  1643   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1644   int n_completed_buffers = 0;
  1645   while (dcqs.apply_closure_to_completed_buffer(worker_i, 0, true)) {
  1646     n_completed_buffers++;
  1648   g1_policy()->record_update_rs_processed_buffers(worker_i,
  1649                                                   (double) n_completed_buffers);
  1650   dcqs.clear_n_completed_buffers();
  1651   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  1655 // Computes the sum of the storage used by the various regions.
  1657 size_t G1CollectedHeap::used() const {
  1658   assert(Heap_lock->owner() != NULL,
  1659          "Should be owned on this thread's behalf.");
  1660   size_t result = _summary_bytes_used;
  1661   // Read only once in case it is set to NULL concurrently
  1662   HeapRegion* hr = _cur_alloc_region;
  1663   if (hr != NULL)
  1664     result += hr->used();
  1665   return result;
  1668 size_t G1CollectedHeap::used_unlocked() const {
  1669   size_t result = _summary_bytes_used;
  1670   return result;
  1673 class SumUsedClosure: public HeapRegionClosure {
  1674   size_t _used;
  1675 public:
  1676   SumUsedClosure() : _used(0) {}
  1677   bool doHeapRegion(HeapRegion* r) {
  1678     if (!r->continuesHumongous()) {
  1679       _used += r->used();
  1681     return false;
  1683   size_t result() { return _used; }
  1684 };
  1686 size_t G1CollectedHeap::recalculate_used() const {
  1687   SumUsedClosure blk;
  1688   _hrs->iterate(&blk);
  1689   return blk.result();
  1692 #ifndef PRODUCT
  1693 class SumUsedRegionsClosure: public HeapRegionClosure {
  1694   size_t _num;
  1695 public:
  1696   SumUsedRegionsClosure() : _num(0) {}
  1697   bool doHeapRegion(HeapRegion* r) {
  1698     if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
  1699       _num += 1;
  1701     return false;
  1703   size_t result() { return _num; }
  1704 };
  1706 size_t G1CollectedHeap::recalculate_used_regions() const {
  1707   SumUsedRegionsClosure blk;
  1708   _hrs->iterate(&blk);
  1709   return blk.result();
  1711 #endif // PRODUCT
  1713 size_t G1CollectedHeap::unsafe_max_alloc() {
  1714   if (_free_regions > 0) return HeapRegion::GrainBytes;
  1715   // otherwise, is there space in the current allocation region?
  1717   // We need to store the current allocation region in a local variable
  1718   // here. The problem is that this method doesn't take any locks and
  1719   // there may be other threads which overwrite the current allocation
  1720   // region field. attempt_allocation(), for example, sets it to NULL
  1721   // and this can happen *after* the NULL check here but before the call
  1722   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
  1723   // to be a problem in the optimized build, since the two loads of the
  1724   // current allocation region field are optimized away.
  1725   HeapRegion* car = _cur_alloc_region;
  1727   // FIXME: should iterate over all regions?
  1728   if (car == NULL) {
  1729     return 0;
  1731   return car->free();
  1734 void G1CollectedHeap::collect(GCCause::Cause cause) {
  1735   // The caller doesn't have the Heap_lock
  1736   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
  1737   MutexLocker ml(Heap_lock);
  1738   collect_locked(cause);
  1741 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
  1742   assert(Thread::current()->is_VM_thread(), "Precondition#1");
  1743   assert(Heap_lock->is_locked(), "Precondition#2");
  1744   GCCauseSetter gcs(this, cause);
  1745   switch (cause) {
  1746     case GCCause::_heap_inspection:
  1747     case GCCause::_heap_dump: {
  1748       HandleMark hm;
  1749       do_full_collection(false);         // don't clear all soft refs
  1750       break;
  1752     default: // XXX FIX ME
  1753       ShouldNotReachHere(); // Unexpected use of this function
  1758 void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
  1759   // Don't want to do a GC until cleanup is completed.
  1760   wait_for_cleanup_complete();
  1762   // Read the GC count while holding the Heap_lock
  1763   int gc_count_before = SharedHeap::heap()->total_collections();
  1765     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
  1766     VM_G1CollectFull op(gc_count_before, cause);
  1767     VMThread::execute(&op);
  1771 bool G1CollectedHeap::is_in(const void* p) const {
  1772   if (_g1_committed.contains(p)) {
  1773     HeapRegion* hr = _hrs->addr_to_region(p);
  1774     return hr->is_in(p);
  1775   } else {
  1776     return _perm_gen->as_gen()->is_in(p);
  1780 // Iteration functions.
  1782 // Iterates an OopClosure over all ref-containing fields of objects
  1783 // within a HeapRegion.
  1785 class IterateOopClosureRegionClosure: public HeapRegionClosure {
  1786   MemRegion _mr;
  1787   OopClosure* _cl;
  1788 public:
  1789   IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  1790     : _mr(mr), _cl(cl) {}
  1791   bool doHeapRegion(HeapRegion* r) {
  1792     if (! r->continuesHumongous()) {
  1793       r->oop_iterate(_cl);
  1795     return false;
  1797 };
  1799 void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
  1800   IterateOopClosureRegionClosure blk(_g1_committed, cl);
  1801   _hrs->iterate(&blk);
  1802   if (do_perm) {
  1803     perm_gen()->oop_iterate(cl);
  1807 void G1CollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm) {
  1808   IterateOopClosureRegionClosure blk(mr, cl);
  1809   _hrs->iterate(&blk);
  1810   if (do_perm) {
  1811     perm_gen()->oop_iterate(cl);
  1815 // Iterates an ObjectClosure over all objects within a HeapRegion.
  1817 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
  1818   ObjectClosure* _cl;
  1819 public:
  1820   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
  1821   bool doHeapRegion(HeapRegion* r) {
  1822     if (! r->continuesHumongous()) {
  1823       r->object_iterate(_cl);
  1825     return false;
  1827 };
  1829 void G1CollectedHeap::object_iterate(ObjectClosure* cl, bool do_perm) {
  1830   IterateObjectClosureRegionClosure blk(cl);
  1831   _hrs->iterate(&blk);
  1832   if (do_perm) {
  1833     perm_gen()->object_iterate(cl);
  1837 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
  1838   // FIXME: is this right?
  1839   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
  1842 // Calls a SpaceClosure on a HeapRegion.
  1844 class SpaceClosureRegionClosure: public HeapRegionClosure {
  1845   SpaceClosure* _cl;
  1846 public:
  1847   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
  1848   bool doHeapRegion(HeapRegion* r) {
  1849     _cl->do_space(r);
  1850     return false;
  1852 };
  1854 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
  1855   SpaceClosureRegionClosure blk(cl);
  1856   _hrs->iterate(&blk);
  1859 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) {
  1860   _hrs->iterate(cl);
  1863 void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
  1864                                                HeapRegionClosure* cl) {
  1865   _hrs->iterate_from(r, cl);
  1868 void
  1869 G1CollectedHeap::heap_region_iterate_from(int idx, HeapRegionClosure* cl) {
  1870   _hrs->iterate_from(idx, cl);
  1873 HeapRegion* G1CollectedHeap::region_at(size_t idx) { return _hrs->at(idx); }
  1875 void
  1876 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  1877                                                  int worker,
  1878                                                  jint claim_value) {
  1879   const size_t regions = n_regions();
  1880   const size_t worker_num = (ParallelGCThreads > 0 ? ParallelGCThreads : 1);
  1881   // try to spread out the starting points of the workers
  1882   const size_t start_index = regions / worker_num * (size_t) worker;
  1884   // each worker will actually look at all regions
  1885   for (size_t count = 0; count < regions; ++count) {
  1886     const size_t index = (start_index + count) % regions;
  1887     assert(0 <= index && index < regions, "sanity");
  1888     HeapRegion* r = region_at(index);
  1889     // we'll ignore "continues humongous" regions (we'll process them
  1890     // when we come across their corresponding "start humongous"
  1891     // region) and regions already claimed
  1892     if (r->claim_value() == claim_value || r->continuesHumongous()) {
  1893       continue;
  1895     // OK, try to claim it
  1896     if (r->claimHeapRegion(claim_value)) {
  1897       // success!
  1898       assert(!r->continuesHumongous(), "sanity");
  1899       if (r->startsHumongous()) {
  1900         // If the region is "starts humongous" we'll iterate over its
  1901         // "continues humongous" first; in fact we'll do them
  1902         // first. The order is important. In on case, calling the
  1903         // closure on the "starts humongous" region might de-allocate
  1904         // and clear all its "continues humongous" regions and, as a
  1905         // result, we might end up processing them twice. So, we'll do
  1906         // them first (notice: most closures will ignore them anyway) and
  1907         // then we'll do the "starts humongous" region.
  1908         for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
  1909           HeapRegion* chr = region_at(ch_index);
  1911           // if the region has already been claimed or it's not
  1912           // "continues humongous" we're done
  1913           if (chr->claim_value() == claim_value ||
  1914               !chr->continuesHumongous()) {
  1915             break;
  1918           // Noone should have claimed it directly. We can given
  1919           // that we claimed its "starts humongous" region.
  1920           assert(chr->claim_value() != claim_value, "sanity");
  1921           assert(chr->humongous_start_region() == r, "sanity");
  1923           if (chr->claimHeapRegion(claim_value)) {
  1924             // we should always be able to claim it; noone else should
  1925             // be trying to claim this region
  1927             bool res2 = cl->doHeapRegion(chr);
  1928             assert(!res2, "Should not abort");
  1930             // Right now, this holds (i.e., no closure that actually
  1931             // does something with "continues humongous" regions
  1932             // clears them). We might have to weaken it in the future,
  1933             // but let's leave these two asserts here for extra safety.
  1934             assert(chr->continuesHumongous(), "should still be the case");
  1935             assert(chr->humongous_start_region() == r, "sanity");
  1936           } else {
  1937             guarantee(false, "we should not reach here");
  1942       assert(!r->continuesHumongous(), "sanity");
  1943       bool res = cl->doHeapRegion(r);
  1944       assert(!res, "Should not abort");
  1949 class ResetClaimValuesClosure: public HeapRegionClosure {
  1950 public:
  1951   bool doHeapRegion(HeapRegion* r) {
  1952     r->set_claim_value(HeapRegion::InitialClaimValue);
  1953     return false;
  1955 };
  1957 void
  1958 G1CollectedHeap::reset_heap_region_claim_values() {
  1959   ResetClaimValuesClosure blk;
  1960   heap_region_iterate(&blk);
  1963 #ifdef ASSERT
  1964 // This checks whether all regions in the heap have the correct claim
  1965 // value. I also piggy-backed on this a check to ensure that the
  1966 // humongous_start_region() information on "continues humongous"
  1967 // regions is correct.
  1969 class CheckClaimValuesClosure : public HeapRegionClosure {
  1970 private:
  1971   jint _claim_value;
  1972   size_t _failures;
  1973   HeapRegion* _sh_region;
  1974 public:
  1975   CheckClaimValuesClosure(jint claim_value) :
  1976     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
  1977   bool doHeapRegion(HeapRegion* r) {
  1978     if (r->claim_value() != _claim_value) {
  1979       gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  1980                              "claim value = %d, should be %d",
  1981                              r->bottom(), r->end(), r->claim_value(),
  1982                              _claim_value);
  1983       ++_failures;
  1985     if (!r->isHumongous()) {
  1986       _sh_region = NULL;
  1987     } else if (r->startsHumongous()) {
  1988       _sh_region = r;
  1989     } else if (r->continuesHumongous()) {
  1990       if (r->humongous_start_region() != _sh_region) {
  1991         gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
  1992                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
  1993                                r->bottom(), r->end(),
  1994                                r->humongous_start_region(),
  1995                                _sh_region);
  1996         ++_failures;
  1999     return false;
  2001   size_t failures() {
  2002     return _failures;
  2004 };
  2006 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
  2007   CheckClaimValuesClosure cl(claim_value);
  2008   heap_region_iterate(&cl);
  2009   return cl.failures() == 0;
  2011 #endif // ASSERT
  2013 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  2014   HeapRegion* r = g1_policy()->collection_set();
  2015   while (r != NULL) {
  2016     HeapRegion* next = r->next_in_collection_set();
  2017     if (cl->doHeapRegion(r)) {
  2018       cl->incomplete();
  2019       return;
  2021     r = next;
  2025 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
  2026                                                   HeapRegionClosure *cl) {
  2027   assert(r->in_collection_set(),
  2028          "Start region must be a member of the collection set.");
  2029   HeapRegion* cur = r;
  2030   while (cur != NULL) {
  2031     HeapRegion* next = cur->next_in_collection_set();
  2032     if (cl->doHeapRegion(cur) && false) {
  2033       cl->incomplete();
  2034       return;
  2036     cur = next;
  2038   cur = g1_policy()->collection_set();
  2039   while (cur != r) {
  2040     HeapRegion* next = cur->next_in_collection_set();
  2041     if (cl->doHeapRegion(cur) && false) {
  2042       cl->incomplete();
  2043       return;
  2045     cur = next;
  2049 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
  2050   return _hrs->length() > 0 ? _hrs->at(0) : NULL;
  2054 Space* G1CollectedHeap::space_containing(const void* addr) const {
  2055   Space* res = heap_region_containing(addr);
  2056   if (res == NULL)
  2057     res = perm_gen()->space_containing(addr);
  2058   return res;
  2061 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
  2062   Space* sp = space_containing(addr);
  2063   if (sp != NULL) {
  2064     return sp->block_start(addr);
  2066   return NULL;
  2069 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
  2070   Space* sp = space_containing(addr);
  2071   assert(sp != NULL, "block_size of address outside of heap");
  2072   return sp->block_size(addr);
  2075 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
  2076   Space* sp = space_containing(addr);
  2077   return sp->block_is_obj(addr);
  2080 bool G1CollectedHeap::supports_tlab_allocation() const {
  2081   return true;
  2084 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
  2085   return HeapRegion::GrainBytes;
  2088 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
  2089   // Return the remaining space in the cur alloc region, but not less than
  2090   // the min TLAB size.
  2091   // Also, no more than half the region size, since we can't allow tlabs to
  2092   // grow big enough to accomodate humongous objects.
  2094   // We need to story it locally, since it might change between when we
  2095   // test for NULL and when we use it later.
  2096   ContiguousSpace* cur_alloc_space = _cur_alloc_region;
  2097   if (cur_alloc_space == NULL) {
  2098     return HeapRegion::GrainBytes/2;
  2099   } else {
  2100     return MAX2(MIN2(cur_alloc_space->free(),
  2101                      (size_t)(HeapRegion::GrainBytes/2)),
  2102                 (size_t)MinTLABSize);
  2106 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) {
  2107   bool dummy;
  2108   return G1CollectedHeap::mem_allocate(size, false, true, &dummy);
  2111 bool G1CollectedHeap::allocs_are_zero_filled() {
  2112   return false;
  2115 size_t G1CollectedHeap::large_typearray_limit() {
  2116   // FIXME
  2117   return HeapRegion::GrainBytes/HeapWordSize;
  2120 size_t G1CollectedHeap::max_capacity() const {
  2121   return _g1_committed.byte_size();
  2124 jlong G1CollectedHeap::millis_since_last_gc() {
  2125   // assert(false, "NYI");
  2126   return 0;
  2130 void G1CollectedHeap::prepare_for_verify() {
  2131   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2132     ensure_parsability(false);
  2134   g1_rem_set()->prepare_for_verify();
  2137 class VerifyLivenessOopClosure: public OopClosure {
  2138   G1CollectedHeap* g1h;
  2139 public:
  2140   VerifyLivenessOopClosure(G1CollectedHeap* _g1h) {
  2141     g1h = _g1h;
  2143   void do_oop(narrowOop *p) { do_oop_work(p); }
  2144   void do_oop(      oop *p) { do_oop_work(p); }
  2146   template <class T> void do_oop_work(T *p) {
  2147     oop obj = oopDesc::load_decode_heap_oop(p);
  2148     guarantee(obj == NULL || !g1h->is_obj_dead(obj),
  2149               "Dead object referenced by a not dead object");
  2151 };
  2153 class VerifyObjsInRegionClosure: public ObjectClosure {
  2154 private:
  2155   G1CollectedHeap* _g1h;
  2156   size_t _live_bytes;
  2157   HeapRegion *_hr;
  2158   bool _use_prev_marking;
  2159 public:
  2160   // use_prev_marking == true  -> use "prev" marking information,
  2161   // use_prev_marking == false -> use "next" marking information
  2162   VerifyObjsInRegionClosure(HeapRegion *hr, bool use_prev_marking)
  2163     : _live_bytes(0), _hr(hr), _use_prev_marking(use_prev_marking) {
  2164     _g1h = G1CollectedHeap::heap();
  2166   void do_object(oop o) {
  2167     VerifyLivenessOopClosure isLive(_g1h);
  2168     assert(o != NULL, "Huh?");
  2169     if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
  2170       o->oop_iterate(&isLive);
  2171       if (!_hr->obj_allocated_since_prev_marking(o))
  2172         _live_bytes += (o->size() * HeapWordSize);
  2175   size_t live_bytes() { return _live_bytes; }
  2176 };
  2178 class PrintObjsInRegionClosure : public ObjectClosure {
  2179   HeapRegion *_hr;
  2180   G1CollectedHeap *_g1;
  2181 public:
  2182   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
  2183     _g1 = G1CollectedHeap::heap();
  2184   };
  2186   void do_object(oop o) {
  2187     if (o != NULL) {
  2188       HeapWord *start = (HeapWord *) o;
  2189       size_t word_sz = o->size();
  2190       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
  2191                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
  2192                           (void*) o, word_sz,
  2193                           _g1->isMarkedPrev(o),
  2194                           _g1->isMarkedNext(o),
  2195                           _hr->obj_allocated_since_prev_marking(o));
  2196       HeapWord *end = start + word_sz;
  2197       HeapWord *cur;
  2198       int *val;
  2199       for (cur = start; cur < end; cur++) {
  2200         val = (int *) cur;
  2201         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
  2205 };
  2207 class VerifyRegionClosure: public HeapRegionClosure {
  2208 private:
  2209   bool _allow_dirty;
  2210   bool _par;
  2211   bool _use_prev_marking;
  2212 public:
  2213   // use_prev_marking == true  -> use "prev" marking information,
  2214   // use_prev_marking == false -> use "next" marking information
  2215   VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
  2216     : _allow_dirty(allow_dirty),
  2217       _par(par),
  2218       _use_prev_marking(use_prev_marking) {}
  2220   bool doHeapRegion(HeapRegion* r) {
  2221     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
  2222               "Should be unclaimed at verify points.");
  2223     if (!r->continuesHumongous()) {
  2224       VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
  2225       r->verify(_allow_dirty, _use_prev_marking);
  2226       r->object_iterate(&not_dead_yet_cl);
  2227       guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
  2228                 "More live objects than counted in last complete marking.");
  2230     return false;
  2232 };
  2234 class VerifyRootsClosure: public OopsInGenClosure {
  2235 private:
  2236   G1CollectedHeap* _g1h;
  2237   bool             _failures;
  2238   bool             _use_prev_marking;
  2239 public:
  2240   // use_prev_marking == true  -> use "prev" marking information,
  2241   // use_prev_marking == false -> use "next" marking information
  2242   VerifyRootsClosure(bool use_prev_marking) :
  2243     _g1h(G1CollectedHeap::heap()),
  2244     _failures(false),
  2245     _use_prev_marking(use_prev_marking) { }
  2247   bool failures() { return _failures; }
  2249   template <class T> void do_oop_nv(T* p) {
  2250     T heap_oop = oopDesc::load_heap_oop(p);
  2251     if (!oopDesc::is_null(heap_oop)) {
  2252       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2253       if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
  2254         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
  2255                                "points to dead obj "PTR_FORMAT, p, (void*) obj);
  2256         obj->print_on(gclog_or_tty);
  2257         _failures = true;
  2262   void do_oop(oop* p)       { do_oop_nv(p); }
  2263   void do_oop(narrowOop* p) { do_oop_nv(p); }
  2264 };
  2266 // This is the task used for parallel heap verification.
  2268 class G1ParVerifyTask: public AbstractGangTask {
  2269 private:
  2270   G1CollectedHeap* _g1h;
  2271   bool _allow_dirty;
  2272   bool _use_prev_marking;
  2274 public:
  2275   // use_prev_marking == true  -> use "prev" marking information,
  2276   // use_prev_marking == false -> use "next" marking information
  2277   G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty,
  2278                   bool use_prev_marking) :
  2279     AbstractGangTask("Parallel verify task"),
  2280     _g1h(g1h),
  2281     _allow_dirty(allow_dirty),
  2282     _use_prev_marking(use_prev_marking) { }
  2284   void work(int worker_i) {
  2285     HandleMark hm;
  2286     VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
  2287     _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
  2288                                           HeapRegion::ParVerifyClaimValue);
  2290 };
  2292 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
  2293   verify(allow_dirty, silent, /* use_prev_marking */ true);
  2296 void G1CollectedHeap::verify(bool allow_dirty,
  2297                              bool silent,
  2298                              bool use_prev_marking) {
  2299   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
  2300     if (!silent) { gclog_or_tty->print("roots "); }
  2301     VerifyRootsClosure rootsCl(use_prev_marking);
  2302     process_strong_roots(false,
  2303                          SharedHeap::SO_AllClasses,
  2304                          &rootsCl,
  2305                          &rootsCl);
  2306     rem_set()->invalidate(perm_gen()->used_region(), false);
  2307     if (!silent) { gclog_or_tty->print("heapRegions "); }
  2308     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
  2309       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2310              "sanity check");
  2312       G1ParVerifyTask task(this, allow_dirty, use_prev_marking);
  2313       int n_workers = workers()->total_workers();
  2314       set_par_threads(n_workers);
  2315       workers()->run_task(&task);
  2316       set_par_threads(0);
  2318       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
  2319              "sanity check");
  2321       reset_heap_region_claim_values();
  2323       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2324              "sanity check");
  2325     } else {
  2326       VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
  2327       _hrs->iterate(&blk);
  2329     if (!silent) gclog_or_tty->print("remset ");
  2330     rem_set()->verify();
  2331     guarantee(!rootsCl.failures(), "should not have had failures");
  2332   } else {
  2333     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
  2337 class PrintRegionClosure: public HeapRegionClosure {
  2338   outputStream* _st;
  2339 public:
  2340   PrintRegionClosure(outputStream* st) : _st(st) {}
  2341   bool doHeapRegion(HeapRegion* r) {
  2342     r->print_on(_st);
  2343     return false;
  2345 };
  2347 void G1CollectedHeap::print() const { print_on(tty); }
  2349 void G1CollectedHeap::print_on(outputStream* st) const {
  2350   print_on(st, PrintHeapAtGCExtended);
  2353 void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
  2354   st->print(" %-20s", "garbage-first heap");
  2355   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  2356             capacity()/K, used_unlocked()/K);
  2357   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  2358             _g1_storage.low_boundary(),
  2359             _g1_storage.high(),
  2360             _g1_storage.high_boundary());
  2361   st->cr();
  2362   st->print("  region size " SIZE_FORMAT "K, ",
  2363             HeapRegion::GrainBytes/K);
  2364   size_t young_regions = _young_list->length();
  2365   st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
  2366             young_regions, young_regions * HeapRegion::GrainBytes / K);
  2367   size_t survivor_regions = g1_policy()->recorded_survivor_regions();
  2368   st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
  2369             survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
  2370   st->cr();
  2371   perm()->as_gen()->print_on(st);
  2372   if (extended) {
  2373     print_on_extended(st);
  2377 void G1CollectedHeap::print_on_extended(outputStream* st) const {
  2378   PrintRegionClosure blk(st);
  2379   _hrs->iterate(&blk);
  2382 class PrintOnThreadsClosure : public ThreadClosure {
  2383   outputStream* _st;
  2384 public:
  2385   PrintOnThreadsClosure(outputStream* st) : _st(st) { }
  2386   virtual void do_thread(Thread *t) {
  2387     t->print_on(_st);
  2389 };
  2391 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  2392   if (ParallelGCThreads > 0) {
  2393     workers()->print_worker_threads();
  2395   st->print("\"G1 concurrent mark GC Thread\" ");
  2396   _cmThread->print();
  2397   st->cr();
  2398   st->print("\"G1 concurrent refinement GC Threads\" ");
  2399   PrintOnThreadsClosure p(st);
  2400   _cg1r->threads_do(&p);
  2401   st->cr();
  2402   st->print("\"G1 zero-fill GC Thread\" ");
  2403   _czft->print_on(st);
  2404   st->cr();
  2407 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  2408   if (ParallelGCThreads > 0) {
  2409     workers()->threads_do(tc);
  2411   tc->do_thread(_cmThread);
  2412   _cg1r->threads_do(tc);
  2413   tc->do_thread(_czft);
  2416 void G1CollectedHeap::print_tracing_info() const {
  2417   concurrent_g1_refine()->print_final_card_counts();
  2419   // We'll overload this to mean "trace GC pause statistics."
  2420   if (TraceGen0Time || TraceGen1Time) {
  2421     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
  2422     // to that.
  2423     g1_policy()->print_tracing_info();
  2425   if (G1SummarizeRSetStats) {
  2426     g1_rem_set()->print_summary_info();
  2428   if (G1SummarizeConcurrentMark) {
  2429     concurrent_mark()->print_summary_info();
  2431   if (G1SummarizeZFStats) {
  2432     ConcurrentZFThread::print_summary_info();
  2434   g1_policy()->print_yg_surv_rate_info();
  2436   GCOverheadReporter::printGCOverhead();
  2438   SpecializationStats::print();
  2442 int G1CollectedHeap::addr_to_arena_id(void* addr) const {
  2443   HeapRegion* hr = heap_region_containing(addr);
  2444   if (hr == NULL) {
  2445     return 0;
  2446   } else {
  2447     return 1;
  2451 G1CollectedHeap* G1CollectedHeap::heap() {
  2452   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
  2453          "not a garbage-first heap");
  2454   return _g1h;
  2457 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
  2458   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  2459   // Call allocation profiler
  2460   AllocationProfiler::iterate_since_last_gc();
  2461   // Fill TLAB's and such
  2462   ensure_parsability(true);
  2465 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
  2466   // FIXME: what is this about?
  2467   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
  2468   // is set.
  2469   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
  2470                         "derived pointer present"));
  2473 void G1CollectedHeap::do_collection_pause() {
  2474   // Read the GC count while holding the Heap_lock
  2475   // we need to do this _before_ wait_for_cleanup_complete(), to
  2476   // ensure that we do not give up the heap lock and potentially
  2477   // pick up the wrong count
  2478   int gc_count_before = SharedHeap::heap()->total_collections();
  2480   // Don't want to do a GC pause while cleanup is being completed!
  2481   wait_for_cleanup_complete();
  2483   g1_policy()->record_stop_world_start();
  2485     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
  2486     VM_G1IncCollectionPause op(gc_count_before);
  2487     VMThread::execute(&op);
  2491 void
  2492 G1CollectedHeap::doConcurrentMark() {
  2493   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2494   if (!_cmThread->in_progress()) {
  2495     _cmThread->set_started();
  2496     CGC_lock->notify();
  2500 class VerifyMarkedObjsClosure: public ObjectClosure {
  2501     G1CollectedHeap* _g1h;
  2502     public:
  2503     VerifyMarkedObjsClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
  2504     void do_object(oop obj) {
  2505       assert(obj->mark()->is_marked() ? !_g1h->is_obj_dead(obj) : true,
  2506              "markandsweep mark should agree with concurrent deadness");
  2508 };
  2510 void
  2511 G1CollectedHeap::checkConcurrentMark() {
  2512     VerifyMarkedObjsClosure verifycl(this);
  2513     //    MutexLockerEx x(getMarkBitMapLock(),
  2514     //              Mutex::_no_safepoint_check_flag);
  2515     object_iterate(&verifycl, false);
  2518 void G1CollectedHeap::do_sync_mark() {
  2519   _cm->checkpointRootsInitial();
  2520   _cm->markFromRoots();
  2521   _cm->checkpointRootsFinal(false);
  2524 // <NEW PREDICTION>
  2526 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
  2527                                                        bool young) {
  2528   return _g1_policy->predict_region_elapsed_time_ms(hr, young);
  2531 void G1CollectedHeap::check_if_region_is_too_expensive(double
  2532                                                            predicted_time_ms) {
  2533   _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
  2536 size_t G1CollectedHeap::pending_card_num() {
  2537   size_t extra_cards = 0;
  2538   JavaThread *curr = Threads::first();
  2539   while (curr != NULL) {
  2540     DirtyCardQueue& dcq = curr->dirty_card_queue();
  2541     extra_cards += dcq.size();
  2542     curr = curr->next();
  2544   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2545   size_t buffer_size = dcqs.buffer_size();
  2546   size_t buffer_num = dcqs.completed_buffers_num();
  2547   return buffer_size * buffer_num + extra_cards;
  2550 size_t G1CollectedHeap::max_pending_card_num() {
  2551   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  2552   size_t buffer_size = dcqs.buffer_size();
  2553   size_t buffer_num  = dcqs.completed_buffers_num();
  2554   int thread_num  = Threads::number_of_threads();
  2555   return (buffer_num + thread_num) * buffer_size;
  2558 size_t G1CollectedHeap::cards_scanned() {
  2559   HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
  2560   return g1_rset->cardsScanned();
  2563 void
  2564 G1CollectedHeap::setup_surviving_young_words() {
  2565   guarantee( _surviving_young_words == NULL, "pre-condition" );
  2566   size_t array_length = g1_policy()->young_cset_length();
  2567   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
  2568   if (_surviving_young_words == NULL) {
  2569     vm_exit_out_of_memory(sizeof(size_t) * array_length,
  2570                           "Not enough space for young surv words summary.");
  2572   memset(_surviving_young_words, 0, array_length * sizeof(size_t));
  2573 #ifdef ASSERT
  2574   for (size_t i = 0;  i < array_length; ++i) {
  2575     assert( _surviving_young_words[i] == 0, "memset above" );
  2577 #endif // !ASSERT
  2580 void
  2581 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
  2582   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  2583   size_t array_length = g1_policy()->young_cset_length();
  2584   for (size_t i = 0; i < array_length; ++i)
  2585     _surviving_young_words[i] += surv_young_words[i];
  2588 void
  2589 G1CollectedHeap::cleanup_surviving_young_words() {
  2590   guarantee( _surviving_young_words != NULL, "pre-condition" );
  2591   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
  2592   _surviving_young_words = NULL;
  2595 // </NEW PREDICTION>
  2597 void
  2598 G1CollectedHeap::do_collection_pause_at_safepoint() {
  2599   if (PrintHeapAtGC) {
  2600     Universe::print_heap_before_gc();
  2604     char verbose_str[128];
  2605     sprintf(verbose_str, "GC pause ");
  2606     if (g1_policy()->in_young_gc_mode()) {
  2607       if (g1_policy()->full_young_gcs())
  2608         strcat(verbose_str, "(young)");
  2609       else
  2610         strcat(verbose_str, "(partial)");
  2612     if (g1_policy()->should_initiate_conc_mark())
  2613       strcat(verbose_str, " (initial-mark)");
  2615     GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
  2617     // if PrintGCDetails is on, we'll print long statistics information
  2618     // in the collector policy code, so let's not print this as the output
  2619     // is messy if we do.
  2620     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  2621     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  2622     TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
  2624     ResourceMark rm;
  2625     assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  2626     assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
  2627     guarantee(!is_gc_active(), "collection is not reentrant");
  2628     assert(regions_accounted_for(), "Region leakage!");
  2630     increment_gc_time_stamp();
  2632     if (g1_policy()->in_young_gc_mode()) {
  2633       assert(check_young_list_well_formed(),
  2634              "young list should be well formed");
  2637     if (GC_locker::is_active()) {
  2638       return; // GC is disabled (e.g. JNI GetXXXCritical operation)
  2641     bool abandoned = false;
  2642     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
  2643       IsGCActiveMark x;
  2645       gc_prologue(false);
  2646       increment_total_collections(false /* full gc */);
  2648 #if G1_REM_SET_LOGGING
  2649       gclog_or_tty->print_cr("\nJust chose CS, heap:");
  2650       print();
  2651 #endif
  2653       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
  2654         HandleMark hm;  // Discard invalid handles created during verification
  2655         prepare_for_verify();
  2656         gclog_or_tty->print(" VerifyBeforeGC:");
  2657         Universe::verify(false);
  2660       COMPILER2_PRESENT(DerivedPointerTable::clear());
  2662       // We want to turn off ref discovery, if necessary, and turn it back on
  2663       // on again later if we do. XXX Dubious: why is discovery disabled?
  2664       bool was_enabled = ref_processor()->discovery_enabled();
  2665       if (was_enabled) ref_processor()->disable_discovery();
  2667       // Forget the current alloc region (we might even choose it to be part
  2668       // of the collection set!).
  2669       abandon_cur_alloc_region();
  2671       // The elapsed time induced by the start time below deliberately elides
  2672       // the possible verification above.
  2673       double start_time_sec = os::elapsedTime();
  2674       GCOverheadReporter::recordSTWStart(start_time_sec);
  2675       size_t start_used_bytes = used();
  2677       g1_policy()->record_collection_pause_start(start_time_sec,
  2678                                                  start_used_bytes);
  2680       guarantee(_in_cset_fast_test == NULL, "invariant");
  2681       guarantee(_in_cset_fast_test_base == NULL, "invariant");
  2682       _in_cset_fast_test_length = max_regions();
  2683       _in_cset_fast_test_base =
  2684                              NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
  2685       memset(_in_cset_fast_test_base, false,
  2686                                      _in_cset_fast_test_length * sizeof(bool));
  2687       // We're biasing _in_cset_fast_test to avoid subtracting the
  2688       // beginning of the heap every time we want to index; basically
  2689       // it's the same with what we do with the card table.
  2690       _in_cset_fast_test = _in_cset_fast_test_base -
  2691               ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
  2693 #if SCAN_ONLY_VERBOSE
  2694       _young_list->print();
  2695 #endif // SCAN_ONLY_VERBOSE
  2697       if (g1_policy()->should_initiate_conc_mark()) {
  2698         concurrent_mark()->checkpointRootsInitialPre();
  2700       save_marks();
  2702       // We must do this before any possible evacuation that should propagate
  2703       // marks.
  2704       if (mark_in_progress()) {
  2705         double start_time_sec = os::elapsedTime();
  2707         _cm->drainAllSATBBuffers();
  2708         double finish_mark_ms = (os::elapsedTime() - start_time_sec) * 1000.0;
  2709         g1_policy()->record_satb_drain_time(finish_mark_ms);
  2711       // Record the number of elements currently on the mark stack, so we
  2712       // only iterate over these.  (Since evacuation may add to the mark
  2713       // stack, doing more exposes race conditions.)  If no mark is in
  2714       // progress, this will be zero.
  2715       _cm->set_oops_do_bound();
  2717       assert(regions_accounted_for(), "Region leakage.");
  2719       if (mark_in_progress())
  2720         concurrent_mark()->newCSet();
  2722       // Now choose the CS.
  2723       g1_policy()->choose_collection_set();
  2725       // We may abandon a pause if we find no region that will fit in the MMU
  2726       // pause.
  2727       bool abandoned = (g1_policy()->collection_set() == NULL);
  2729       // Nothing to do if we were unable to choose a collection set.
  2730       if (!abandoned) {
  2731 #if G1_REM_SET_LOGGING
  2732         gclog_or_tty->print_cr("\nAfter pause, heap:");
  2733         print();
  2734 #endif
  2736         setup_surviving_young_words();
  2738         // Set up the gc allocation regions.
  2739         get_gc_alloc_regions();
  2741         // Actually do the work...
  2742         evacuate_collection_set();
  2743         free_collection_set(g1_policy()->collection_set());
  2744         g1_policy()->clear_collection_set();
  2746         FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
  2747         // this is more for peace of mind; we're nulling them here and
  2748         // we're expecting them to be null at the beginning of the next GC
  2749         _in_cset_fast_test = NULL;
  2750         _in_cset_fast_test_base = NULL;
  2752         release_gc_alloc_regions(false /* totally */);
  2754         cleanup_surviving_young_words();
  2756         if (g1_policy()->in_young_gc_mode()) {
  2757           _young_list->reset_sampled_info();
  2758           assert(check_young_list_empty(true),
  2759                  "young list should be empty");
  2761 #if SCAN_ONLY_VERBOSE
  2762           _young_list->print();
  2763 #endif // SCAN_ONLY_VERBOSE
  2765           g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  2766                                           _young_list->first_survivor_region(),
  2767                                           _young_list->last_survivor_region());
  2768           _young_list->reset_auxilary_lists();
  2770       } else {
  2771         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  2774       if (evacuation_failed()) {
  2775         _summary_bytes_used = recalculate_used();
  2776       } else {
  2777         // The "used" of the the collection set have already been subtracted
  2778         // when they were freed.  Add in the bytes evacuated.
  2779         _summary_bytes_used += g1_policy()->bytes_in_to_space();
  2782       if (g1_policy()->in_young_gc_mode() &&
  2783           g1_policy()->should_initiate_conc_mark()) {
  2784         concurrent_mark()->checkpointRootsInitialPost();
  2785         set_marking_started();
  2786         // CAUTION: after the doConcurrentMark() call below,
  2787         // the concurrent marking thread(s) could be running
  2788         // concurrently with us. Make sure that anything after
  2789         // this point does not assume that we are the only GC thread
  2790         // running. Note: of course, the actual marking work will
  2791         // not start until the safepoint itself is released in
  2792         // ConcurrentGCThread::safepoint_desynchronize().
  2793         doConcurrentMark();
  2796 #if SCAN_ONLY_VERBOSE
  2797       _young_list->print();
  2798 #endif // SCAN_ONLY_VERBOSE
  2800       double end_time_sec = os::elapsedTime();
  2801       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
  2802       g1_policy()->record_pause_time_ms(pause_time_ms);
  2803       GCOverheadReporter::recordSTWEnd(end_time_sec);
  2804       g1_policy()->record_collection_pause_end(abandoned);
  2806       assert(regions_accounted_for(), "Region leakage.");
  2808       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
  2809         HandleMark hm;  // Discard invalid handles created during verification
  2810         gclog_or_tty->print(" VerifyAfterGC:");
  2811         prepare_for_verify();
  2812         Universe::verify(false);
  2815       if (was_enabled) ref_processor()->enable_discovery();
  2818         size_t expand_bytes = g1_policy()->expansion_amount();
  2819         if (expand_bytes > 0) {
  2820           size_t bytes_before = capacity();
  2821           expand(expand_bytes);
  2825       if (mark_in_progress()) {
  2826         concurrent_mark()->update_g1_committed();
  2829 #ifdef TRACESPINNING
  2830       ParallelTaskTerminator::print_termination_counts();
  2831 #endif
  2833       gc_epilogue(false);
  2836     assert(verify_region_lists(), "Bad region lists.");
  2838     if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
  2839       gclog_or_tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
  2840       print_tracing_info();
  2841       vm_exit(-1);
  2845   if (PrintHeapAtGC) {
  2846     Universe::print_heap_after_gc();
  2848   if (G1SummarizeRSetStats &&
  2849       (G1SummarizeRSetStatsPeriod > 0) &&
  2850       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  2851     g1_rem_set()->print_summary_info();
  2855 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
  2856   assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
  2857   // make sure we don't call set_gc_alloc_region() multiple times on
  2858   // the same region
  2859   assert(r == NULL || !r->is_gc_alloc_region(),
  2860          "shouldn't already be a GC alloc region");
  2861   HeapWord* original_top = NULL;
  2862   if (r != NULL)
  2863     original_top = r->top();
  2865   // We will want to record the used space in r as being there before gc.
  2866   // One we install it as a GC alloc region it's eligible for allocation.
  2867   // So record it now and use it later.
  2868   size_t r_used = 0;
  2869   if (r != NULL) {
  2870     r_used = r->used();
  2872     if (ParallelGCThreads > 0) {
  2873       // need to take the lock to guard against two threads calling
  2874       // get_gc_alloc_region concurrently (very unlikely but...)
  2875       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  2876       r->save_marks();
  2879   HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
  2880   _gc_alloc_regions[purpose] = r;
  2881   if (old_alloc_region != NULL) {
  2882     // Replace aliases too.
  2883     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  2884       if (_gc_alloc_regions[ap] == old_alloc_region) {
  2885         _gc_alloc_regions[ap] = r;
  2889   if (r != NULL) {
  2890     push_gc_alloc_region(r);
  2891     if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
  2892       // We are using a region as a GC alloc region after it has been used
  2893       // as a mutator allocation region during the current marking cycle.
  2894       // The mutator-allocated objects are currently implicitly marked, but
  2895       // when we move hr->next_top_at_mark_start() forward at the the end
  2896       // of the GC pause, they won't be.  We therefore mark all objects in
  2897       // the "gap".  We do this object-by-object, since marking densely
  2898       // does not currently work right with marking bitmap iteration.  This
  2899       // means we rely on TLAB filling at the start of pauses, and no
  2900       // "resuscitation" of filled TLAB's.  If we want to do this, we need
  2901       // to fix the marking bitmap iteration.
  2902       HeapWord* curhw = r->next_top_at_mark_start();
  2903       HeapWord* t = original_top;
  2905       while (curhw < t) {
  2906         oop cur = (oop)curhw;
  2907         // We'll assume parallel for generality.  This is rare code.
  2908         concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
  2909         curhw = curhw + cur->size();
  2911       assert(curhw == t, "Should have parsed correctly.");
  2913     if (G1PolicyVerbose > 1) {
  2914       gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
  2915                           "for survivors:", r->bottom(), original_top, r->end());
  2916       r->print();
  2918     g1_policy()->record_before_bytes(r_used);
  2922 void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
  2923   assert(Thread::current()->is_VM_thread() ||
  2924          par_alloc_during_gc_lock()->owned_by_self(), "Precondition");
  2925   assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
  2926          "Precondition.");
  2927   hr->set_is_gc_alloc_region(true);
  2928   hr->set_next_gc_alloc_region(_gc_alloc_region_list);
  2929   _gc_alloc_region_list = hr;
  2932 #ifdef G1_DEBUG
  2933 class FindGCAllocRegion: public HeapRegionClosure {
  2934 public:
  2935   bool doHeapRegion(HeapRegion* r) {
  2936     if (r->is_gc_alloc_region()) {
  2937       gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
  2938                              r->hrs_index(), r->bottom());
  2940     return false;
  2942 };
  2943 #endif // G1_DEBUG
  2945 void G1CollectedHeap::forget_alloc_region_list() {
  2946   assert(Thread::current()->is_VM_thread(), "Precondition");
  2947   while (_gc_alloc_region_list != NULL) {
  2948     HeapRegion* r = _gc_alloc_region_list;
  2949     assert(r->is_gc_alloc_region(), "Invariant.");
  2950     // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
  2951     // newly allocated data in order to be able to apply deferred updates
  2952     // before the GC is done for verification purposes (i.e to allow
  2953     // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
  2954     // collection.
  2955     r->ContiguousSpace::set_saved_mark();
  2956     _gc_alloc_region_list = r->next_gc_alloc_region();
  2957     r->set_next_gc_alloc_region(NULL);
  2958     r->set_is_gc_alloc_region(false);
  2959     if (r->is_survivor()) {
  2960       if (r->is_empty()) {
  2961         r->set_not_young();
  2962       } else {
  2963         _young_list->add_survivor_region(r);
  2966     if (r->is_empty()) {
  2967       ++_free_regions;
  2970 #ifdef G1_DEBUG
  2971   FindGCAllocRegion fa;
  2972   heap_region_iterate(&fa);
  2973 #endif // G1_DEBUG
  2977 bool G1CollectedHeap::check_gc_alloc_regions() {
  2978   // TODO: allocation regions check
  2979   return true;
  2982 void G1CollectedHeap::get_gc_alloc_regions() {
  2983   // First, let's check that the GC alloc region list is empty (it should)
  2984   assert(_gc_alloc_region_list == NULL, "invariant");
  2986   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  2987     assert(_gc_alloc_regions[ap] == NULL, "invariant");
  2988     assert(_gc_alloc_region_counts[ap] == 0, "invariant");
  2990     // Create new GC alloc regions.
  2991     HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
  2992     _retained_gc_alloc_regions[ap] = NULL;
  2994     if (alloc_region != NULL) {
  2995       assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
  2997       // let's make sure that the GC alloc region is not tagged as such
  2998       // outside a GC operation
  2999       assert(!alloc_region->is_gc_alloc_region(), "sanity");
  3001       if (alloc_region->in_collection_set() ||
  3002           alloc_region->top() == alloc_region->end() ||
  3003           alloc_region->top() == alloc_region->bottom()) {
  3004         // we will discard the current GC alloc region if it's in the
  3005         // collection set (it can happen!), if it's already full (no
  3006         // point in using it), or if it's empty (this means that it
  3007         // was emptied during a cleanup and it should be on the free
  3008         // list now).
  3010         alloc_region = NULL;
  3014     if (alloc_region == NULL) {
  3015       // we will get a new GC alloc region
  3016       alloc_region = newAllocRegionWithExpansion(ap, 0);
  3017     } else {
  3018       // the region was retained from the last collection
  3019       ++_gc_alloc_region_counts[ap];
  3022     if (alloc_region != NULL) {
  3023       assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
  3024       set_gc_alloc_region(ap, alloc_region);
  3027     assert(_gc_alloc_regions[ap] == NULL ||
  3028            _gc_alloc_regions[ap]->is_gc_alloc_region(),
  3029            "the GC alloc region should be tagged as such");
  3030     assert(_gc_alloc_regions[ap] == NULL ||
  3031            _gc_alloc_regions[ap] == _gc_alloc_region_list,
  3032            "the GC alloc region should be the same as the GC alloc list head");
  3034   // Set alternative regions for allocation purposes that have reached
  3035   // their limit.
  3036   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3037     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
  3038     if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
  3039       _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
  3042   assert(check_gc_alloc_regions(), "alloc regions messed up");
  3045 void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
  3046   // We keep a separate list of all regions that have been alloc regions in
  3047   // the current collection pause. Forget that now. This method will
  3048   // untag the GC alloc regions and tear down the GC alloc region
  3049   // list. It's desirable that no regions are tagged as GC alloc
  3050   // outside GCs.
  3051   forget_alloc_region_list();
  3053   // The current alloc regions contain objs that have survived
  3054   // collection. Make them no longer GC alloc regions.
  3055   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3056     HeapRegion* r = _gc_alloc_regions[ap];
  3057     _retained_gc_alloc_regions[ap] = NULL;
  3058     _gc_alloc_region_counts[ap] = 0;
  3060     if (r != NULL) {
  3061       // we retain nothing on _gc_alloc_regions between GCs
  3062       set_gc_alloc_region(ap, NULL);
  3064       if (r->is_empty()) {
  3065         // we didn't actually allocate anything in it; let's just put
  3066         // it on the free list
  3067         MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  3068         r->set_zero_fill_complete();
  3069         put_free_region_on_list_locked(r);
  3070       } else if (_retain_gc_alloc_region[ap] && !totally) {
  3071         // retain it so that we can use it at the beginning of the next GC
  3072         _retained_gc_alloc_regions[ap] = r;
  3078 #ifndef PRODUCT
  3079 // Useful for debugging
  3081 void G1CollectedHeap::print_gc_alloc_regions() {
  3082   gclog_or_tty->print_cr("GC alloc regions");
  3083   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  3084     HeapRegion* r = _gc_alloc_regions[ap];
  3085     if (r == NULL) {
  3086       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT, ap, NULL);
  3087     } else {
  3088       gclog_or_tty->print_cr("  %2d : "PTR_FORMAT" "SIZE_FORMAT,
  3089                              ap, r->bottom(), r->used());
  3093 #endif // PRODUCT
  3095 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
  3096   _drain_in_progress = false;
  3097   set_evac_failure_closure(cl);
  3098   _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3101 void G1CollectedHeap::finalize_for_evac_failure() {
  3102   assert(_evac_failure_scan_stack != NULL &&
  3103          _evac_failure_scan_stack->length() == 0,
  3104          "Postcondition");
  3105   assert(!_drain_in_progress, "Postcondition");
  3106   // Don't have to delete, since the scan stack is a resource object.
  3107   _evac_failure_scan_stack = NULL;
  3112 // *** Sequential G1 Evacuation
  3114 HeapWord* G1CollectedHeap::allocate_during_gc(GCAllocPurpose purpose, size_t word_size) {
  3115   HeapRegion* alloc_region = _gc_alloc_regions[purpose];
  3116   // let the caller handle alloc failure
  3117   if (alloc_region == NULL) return NULL;
  3118   assert(isHumongous(word_size) || !alloc_region->isHumongous(),
  3119          "Either the object is humongous or the region isn't");
  3120   HeapWord* block = alloc_region->allocate(word_size);
  3121   if (block == NULL) {
  3122     block = allocate_during_gc_slow(purpose, alloc_region, false, word_size);
  3124   return block;
  3127 class G1IsAliveClosure: public BoolObjectClosure {
  3128   G1CollectedHeap* _g1;
  3129 public:
  3130   G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3131   void do_object(oop p) { assert(false, "Do not call."); }
  3132   bool do_object_b(oop p) {
  3133     // It is reachable if it is outside the collection set, or is inside
  3134     // and forwarded.
  3136 #ifdef G1_DEBUG
  3137     gclog_or_tty->print_cr("is alive "PTR_FORMAT" in CS %d forwarded %d overall %d",
  3138                            (void*) p, _g1->obj_in_cs(p), p->is_forwarded(),
  3139                            !_g1->obj_in_cs(p) || p->is_forwarded());
  3140 #endif // G1_DEBUG
  3142     return !_g1->obj_in_cs(p) || p->is_forwarded();
  3144 };
  3146 class G1KeepAliveClosure: public OopClosure {
  3147   G1CollectedHeap* _g1;
  3148 public:
  3149   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  3150   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
  3151   void do_oop(      oop* p) {
  3152     oop obj = *p;
  3153 #ifdef G1_DEBUG
  3154     if (PrintGC && Verbose) {
  3155       gclog_or_tty->print_cr("keep alive *"PTR_FORMAT" = "PTR_FORMAT" "PTR_FORMAT,
  3156                              p, (void*) obj, (void*) *p);
  3158 #endif // G1_DEBUG
  3160     if (_g1->obj_in_cs(obj)) {
  3161       assert( obj->is_forwarded(), "invariant" );
  3162       *p = obj->forwardee();
  3163 #ifdef G1_DEBUG
  3164       gclog_or_tty->print_cr("     in CSet: moved "PTR_FORMAT" -> "PTR_FORMAT,
  3165                              (void*) obj, (void*) *p);
  3166 #endif // G1_DEBUG
  3169 };
  3171 class UpdateRSetImmediate : public OopsInHeapRegionClosure {
  3172 private:
  3173   G1CollectedHeap* _g1;
  3174   G1RemSet* _g1_rem_set;
  3175 public:
  3176   UpdateRSetImmediate(G1CollectedHeap* g1) :
  3177     _g1(g1), _g1_rem_set(g1->g1_rem_set()) {}
  3179   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  3180   virtual void do_oop(      oop* p) { do_oop_work(p); }
  3181   template <class T> void do_oop_work(T* p) {
  3182     assert(_from->is_in_reserved(p), "paranoia");
  3183     T heap_oop = oopDesc::load_heap_oop(p);
  3184     if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
  3185       _g1_rem_set->par_write_ref(_from, p, 0);
  3188 };
  3190 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
  3191 private:
  3192   G1CollectedHeap* _g1;
  3193   DirtyCardQueue *_dcq;
  3194   CardTableModRefBS* _ct_bs;
  3196 public:
  3197   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
  3198     _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
  3200   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  3201   virtual void do_oop(      oop* p) { do_oop_work(p); }
  3202   template <class T> void do_oop_work(T* p) {
  3203     assert(_from->is_in_reserved(p), "paranoia");
  3204     if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
  3205         !_from->is_survivor()) {
  3206       size_t card_index = _ct_bs->index_for(p);
  3207       if (_ct_bs->mark_card_deferred(card_index)) {
  3208         _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
  3212 };
  3216 class RemoveSelfPointerClosure: public ObjectClosure {
  3217 private:
  3218   G1CollectedHeap* _g1;
  3219   ConcurrentMark* _cm;
  3220   HeapRegion* _hr;
  3221   size_t _prev_marked_bytes;
  3222   size_t _next_marked_bytes;
  3223   OopsInHeapRegionClosure *_cl;
  3224 public:
  3225   RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) :
  3226     _g1(g1), _cm(_g1->concurrent_mark()),  _prev_marked_bytes(0),
  3227     _next_marked_bytes(0), _cl(cl) {}
  3229   size_t prev_marked_bytes() { return _prev_marked_bytes; }
  3230   size_t next_marked_bytes() { return _next_marked_bytes; }
  3232   // The original idea here was to coalesce evacuated and dead objects.
  3233   // However that caused complications with the block offset table (BOT).
  3234   // In particular if there were two TLABs, one of them partially refined.
  3235   // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
  3236   // The BOT entries of the unrefined part of TLAB_2 point to the start
  3237   // of TLAB_2. If the last object of the TLAB_1 and the first object
  3238   // of TLAB_2 are coalesced, then the cards of the unrefined part
  3239   // would point into middle of the filler object.
  3240   //
  3241   // The current approach is to not coalesce and leave the BOT contents intact.
  3242   void do_object(oop obj) {
  3243     if (obj->is_forwarded() && obj->forwardee() == obj) {
  3244       // The object failed to move.
  3245       assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs.");
  3246       _cm->markPrev(obj);
  3247       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3248       _prev_marked_bytes += (obj->size() * HeapWordSize);
  3249       if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) {
  3250         _cm->markAndGrayObjectIfNecessary(obj);
  3252       obj->set_mark(markOopDesc::prototype());
  3253       // While we were processing RSet buffers during the
  3254       // collection, we actually didn't scan any cards on the
  3255       // collection set, since we didn't want to update remebered
  3256       // sets with entries that point into the collection set, given
  3257       // that live objects fromthe collection set are about to move
  3258       // and such entries will be stale very soon. This change also
  3259       // dealt with a reliability issue which involved scanning a
  3260       // card in the collection set and coming across an array that
  3261       // was being chunked and looking malformed. The problem is
  3262       // that, if evacuation fails, we might have remembered set
  3263       // entries missing given that we skipped cards on the
  3264       // collection set. So, we'll recreate such entries now.
  3265       obj->oop_iterate(_cl);
  3266       assert(_cm->isPrevMarked(obj), "Should be marked!");
  3267     } else {
  3268       // The object has been either evacuated or is dead. Fill it with a
  3269       // dummy object.
  3270       MemRegion mr((HeapWord*)obj, obj->size());
  3271       CollectedHeap::fill_with_object(mr);
  3272       _cm->clearRangeBothMaps(mr);
  3275 };
  3277 void G1CollectedHeap::remove_self_forwarding_pointers() {
  3278   UpdateRSetImmediate immediate_update(_g1h);
  3279   DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
  3280   UpdateRSetDeferred deferred_update(_g1h, &dcq);
  3281   OopsInHeapRegionClosure *cl;
  3282   if (G1DeferredRSUpdate) {
  3283     cl = &deferred_update;
  3284   } else {
  3285     cl = &immediate_update;
  3287   HeapRegion* cur = g1_policy()->collection_set();
  3288   while (cur != NULL) {
  3289     assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3291     RemoveSelfPointerClosure rspc(_g1h, cl);
  3292     if (cur->evacuation_failed()) {
  3293       assert(cur->in_collection_set(), "bad CS");
  3294       cl->set_region(cur);
  3295       cur->object_iterate(&rspc);
  3297       // A number of manipulations to make the TAMS be the current top,
  3298       // and the marked bytes be the ones observed in the iteration.
  3299       if (_g1h->concurrent_mark()->at_least_one_mark_complete()) {
  3300         // The comments below are the postconditions achieved by the
  3301         // calls.  Note especially the last such condition, which says that
  3302         // the count of marked bytes has been properly restored.
  3303         cur->note_start_of_marking(false);
  3304         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3305         cur->add_to_marked_bytes(rspc.prev_marked_bytes());
  3306         // _next_marked_bytes == prev_marked_bytes.
  3307         cur->note_end_of_marking();
  3308         // _prev_top_at_mark_start == top(),
  3309         // _prev_marked_bytes == prev_marked_bytes
  3311       // If there is no mark in progress, we modified the _next variables
  3312       // above needlessly, but harmlessly.
  3313       if (_g1h->mark_in_progress()) {
  3314         cur->note_start_of_marking(false);
  3315         // _next_top_at_mark_start == top, _next_marked_bytes == 0
  3316         // _next_marked_bytes == next_marked_bytes.
  3319       // Now make sure the region has the right index in the sorted array.
  3320       g1_policy()->note_change_in_marked_bytes(cur);
  3322     cur = cur->next_in_collection_set();
  3324   assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!");
  3326   // Now restore saved marks, if any.
  3327   if (_objs_with_preserved_marks != NULL) {
  3328     assert(_preserved_marks_of_objs != NULL, "Both or none.");
  3329     assert(_objs_with_preserved_marks->length() ==
  3330            _preserved_marks_of_objs->length(), "Both or none.");
  3331     guarantee(_objs_with_preserved_marks->length() ==
  3332               _preserved_marks_of_objs->length(), "Both or none.");
  3333     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
  3334       oop obj   = _objs_with_preserved_marks->at(i);
  3335       markOop m = _preserved_marks_of_objs->at(i);
  3336       obj->set_mark(m);
  3338     // Delete the preserved marks growable arrays (allocated on the C heap).
  3339     delete _objs_with_preserved_marks;
  3340     delete _preserved_marks_of_objs;
  3341     _objs_with_preserved_marks = NULL;
  3342     _preserved_marks_of_objs = NULL;
  3346 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
  3347   _evac_failure_scan_stack->push(obj);
  3350 void G1CollectedHeap::drain_evac_failure_scan_stack() {
  3351   assert(_evac_failure_scan_stack != NULL, "precondition");
  3353   while (_evac_failure_scan_stack->length() > 0) {
  3354      oop obj = _evac_failure_scan_stack->pop();
  3355      _evac_failure_closure->set_region(heap_region_containing(obj));
  3356      obj->oop_iterate_backwards(_evac_failure_closure);
  3360 void G1CollectedHeap::handle_evacuation_failure(oop old) {
  3361   markOop m = old->mark();
  3362   // forward to self
  3363   assert(!old->is_forwarded(), "precondition");
  3365   old->forward_to(old);
  3366   handle_evacuation_failure_common(old, m);
  3369 oop
  3370 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  3371                                                oop old) {
  3372   markOop m = old->mark();
  3373   oop forward_ptr = old->forward_to_atomic(old);
  3374   if (forward_ptr == NULL) {
  3375     // Forward-to-self succeeded.
  3376     if (_evac_failure_closure != cl) {
  3377       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  3378       assert(!_drain_in_progress,
  3379              "Should only be true while someone holds the lock.");
  3380       // Set the global evac-failure closure to the current thread's.
  3381       assert(_evac_failure_closure == NULL, "Or locking has failed.");
  3382       set_evac_failure_closure(cl);
  3383       // Now do the common part.
  3384       handle_evacuation_failure_common(old, m);
  3385       // Reset to NULL.
  3386       set_evac_failure_closure(NULL);
  3387     } else {
  3388       // The lock is already held, and this is recursive.
  3389       assert(_drain_in_progress, "This should only be the recursive case.");
  3390       handle_evacuation_failure_common(old, m);
  3392     return old;
  3393   } else {
  3394     // Someone else had a place to copy it.
  3395     return forward_ptr;
  3399 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  3400   set_evacuation_failed(true);
  3402   preserve_mark_if_necessary(old, m);
  3404   HeapRegion* r = heap_region_containing(old);
  3405   if (!r->evacuation_failed()) {
  3406     r->set_evacuation_failed(true);
  3407     if (G1PrintRegions) {
  3408       gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
  3409                           "["PTR_FORMAT","PTR_FORMAT")\n",
  3410                           r, r->bottom(), r->end());
  3414   push_on_evac_failure_scan_stack(old);
  3416   if (!_drain_in_progress) {
  3417     // prevent recursion in copy_to_survivor_space()
  3418     _drain_in_progress = true;
  3419     drain_evac_failure_scan_stack();
  3420     _drain_in_progress = false;
  3424 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
  3425   if (m != markOopDesc::prototype()) {
  3426     if (_objs_with_preserved_marks == NULL) {
  3427       assert(_preserved_marks_of_objs == NULL, "Both or none.");
  3428       _objs_with_preserved_marks =
  3429         new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
  3430       _preserved_marks_of_objs =
  3431         new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
  3433     _objs_with_preserved_marks->push(obj);
  3434     _preserved_marks_of_objs->push(m);
  3438 // *** Parallel G1 Evacuation
  3440 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
  3441                                                   size_t word_size) {
  3442   HeapRegion* alloc_region = _gc_alloc_regions[purpose];
  3443   // let the caller handle alloc failure
  3444   if (alloc_region == NULL) return NULL;
  3446   HeapWord* block = alloc_region->par_allocate(word_size);
  3447   if (block == NULL) {
  3448     MutexLockerEx x(par_alloc_during_gc_lock(),
  3449                     Mutex::_no_safepoint_check_flag);
  3450     block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
  3452   return block;
  3455 void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
  3456                                             bool par) {
  3457   // Another thread might have obtained alloc_region for the given
  3458   // purpose, and might be attempting to allocate in it, and might
  3459   // succeed.  Therefore, we can't do the "finalization" stuff on the
  3460   // region below until we're sure the last allocation has happened.
  3461   // We ensure this by allocating the remaining space with a garbage
  3462   // object.
  3463   if (par) par_allocate_remaining_space(alloc_region);
  3464   // Now we can do the post-GC stuff on the region.
  3465   alloc_region->note_end_of_copying();
  3466   g1_policy()->record_after_bytes(alloc_region->used());
  3469 HeapWord*
  3470 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
  3471                                          HeapRegion*    alloc_region,
  3472                                          bool           par,
  3473                                          size_t         word_size) {
  3474   HeapWord* block = NULL;
  3475   // In the parallel case, a previous thread to obtain the lock may have
  3476   // already assigned a new gc_alloc_region.
  3477   if (alloc_region != _gc_alloc_regions[purpose]) {
  3478     assert(par, "But should only happen in parallel case.");
  3479     alloc_region = _gc_alloc_regions[purpose];
  3480     if (alloc_region == NULL) return NULL;
  3481     block = alloc_region->par_allocate(word_size);
  3482     if (block != NULL) return block;
  3483     // Otherwise, continue; this new region is empty, too.
  3485   assert(alloc_region != NULL, "We better have an allocation region");
  3486   retire_alloc_region(alloc_region, par);
  3488   if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
  3489     // Cannot allocate more regions for the given purpose.
  3490     GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
  3491     // Is there an alternative?
  3492     if (purpose != alt_purpose) {
  3493       HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
  3494       // Has not the alternative region been aliased?
  3495       if (alloc_region != alt_region && alt_region != NULL) {
  3496         // Try to allocate in the alternative region.
  3497         if (par) {
  3498           block = alt_region->par_allocate(word_size);
  3499         } else {
  3500           block = alt_region->allocate(word_size);
  3502         // Make an alias.
  3503         _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
  3504         if (block != NULL) {
  3505           return block;
  3507         retire_alloc_region(alt_region, par);
  3509       // Both the allocation region and the alternative one are full
  3510       // and aliased, replace them with a new allocation region.
  3511       purpose = alt_purpose;
  3512     } else {
  3513       set_gc_alloc_region(purpose, NULL);
  3514       return NULL;
  3518   // Now allocate a new region for allocation.
  3519   alloc_region = newAllocRegionWithExpansion(purpose, word_size, false /*zero_filled*/);
  3521   // let the caller handle alloc failure
  3522   if (alloc_region != NULL) {
  3524     assert(check_gc_alloc_regions(), "alloc regions messed up");
  3525     assert(alloc_region->saved_mark_at_top(),
  3526            "Mark should have been saved already.");
  3527     // We used to assert that the region was zero-filled here, but no
  3528     // longer.
  3530     // This must be done last: once it's installed, other regions may
  3531     // allocate in it (without holding the lock.)
  3532     set_gc_alloc_region(purpose, alloc_region);
  3534     if (par) {
  3535       block = alloc_region->par_allocate(word_size);
  3536     } else {
  3537       block = alloc_region->allocate(word_size);
  3539     // Caller handles alloc failure.
  3540   } else {
  3541     // This sets other apis using the same old alloc region to NULL, also.
  3542     set_gc_alloc_region(purpose, NULL);
  3544   return block;  // May be NULL.
  3547 void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
  3548   HeapWord* block = NULL;
  3549   size_t free_words;
  3550   do {
  3551     free_words = r->free()/HeapWordSize;
  3552     // If there's too little space, no one can allocate, so we're done.
  3553     if (free_words < (size_t)oopDesc::header_size()) return;
  3554     // Otherwise, try to claim it.
  3555     block = r->par_allocate(free_words);
  3556   } while (block == NULL);
  3557   fill_with_object(block, free_words);
  3560 #ifndef PRODUCT
  3561 bool GCLabBitMapClosure::do_bit(size_t offset) {
  3562   HeapWord* addr = _bitmap->offsetToHeapWord(offset);
  3563   guarantee(_cm->isMarked(oop(addr)), "it should be!");
  3564   return true;
  3566 #endif // PRODUCT
  3568 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
  3569   : _g1h(g1h),
  3570     _refs(g1h->task_queue(queue_num)),
  3571     _dcq(&g1h->dirty_card_queue_set()),
  3572     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
  3573     _g1_rem(g1h->g1_rem_set()),
  3574     _hash_seed(17), _queue_num(queue_num),
  3575     _term_attempts(0),
  3576     _age_table(false),
  3577 #if G1_DETAILED_STATS
  3578     _pushes(0), _pops(0), _steals(0),
  3579     _steal_attempts(0),  _overflow_pushes(0),
  3580 #endif
  3581     _strong_roots_time(0), _term_time(0),
  3582     _alloc_buffer_waste(0), _undo_waste(0)
  3584   // we allocate G1YoungSurvRateNumRegions plus one entries, since
  3585   // we "sacrifice" entry 0 to keep track of surviving bytes for
  3586   // non-young regions (where the age is -1)
  3587   // We also add a few elements at the beginning and at the end in
  3588   // an attempt to eliminate cache contention
  3589   size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
  3590   size_t array_length = PADDING_ELEM_NUM +
  3591                         real_length +
  3592                         PADDING_ELEM_NUM;
  3593   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
  3594   if (_surviving_young_words_base == NULL)
  3595     vm_exit_out_of_memory(array_length * sizeof(size_t),
  3596                           "Not enough space for young surv histo.");
  3597   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
  3598   memset(_surviving_young_words, 0, real_length * sizeof(size_t));
  3600   _overflowed_refs = new OverflowQueue(10);
  3602   _start = os::elapsedTime();
  3605 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  3606   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
  3607   _par_scan_state(par_scan_state) { }
  3609 template <class T> void G1ParCopyHelper::mark_forwardee(T* p) {
  3610   // This is called _after_ do_oop_work has been called, hence after
  3611   // the object has been relocated to its new location and *p points
  3612   // to its new location.
  3614   T heap_oop = oopDesc::load_heap_oop(p);
  3615   if (!oopDesc::is_null(heap_oop)) {
  3616     oop obj = oopDesc::decode_heap_oop(heap_oop);
  3617     assert((_g1->evacuation_failed()) || (!_g1->obj_in_cs(obj)),
  3618            "shouldn't still be in the CSet if evacuation didn't fail.");
  3619     HeapWord* addr = (HeapWord*)obj;
  3620     if (_g1->is_in_g1_reserved(addr))
  3621       _cm->grayRoot(oop(addr));
  3625 oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
  3626   size_t    word_sz = old->size();
  3627   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
  3628   // +1 to make the -1 indexes valid...
  3629   int       young_index = from_region->young_index_in_cset()+1;
  3630   assert( (from_region->is_young() && young_index > 0) ||
  3631           (!from_region->is_young() && young_index == 0), "invariant" );
  3632   G1CollectorPolicy* g1p = _g1->g1_policy();
  3633   markOop m = old->mark();
  3634   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
  3635                                            : m->age();
  3636   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
  3637                                                              word_sz);
  3638   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
  3639   oop       obj     = oop(obj_ptr);
  3641   if (obj_ptr == NULL) {
  3642     // This will either forward-to-self, or detect that someone else has
  3643     // installed a forwarding pointer.
  3644     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  3645     return _g1->handle_evacuation_failure_par(cl, old);
  3648   // We're going to allocate linearly, so might as well prefetch ahead.
  3649   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
  3651   oop forward_ptr = old->forward_to_atomic(obj);
  3652   if (forward_ptr == NULL) {
  3653     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
  3654     if (g1p->track_object_age(alloc_purpose)) {
  3655       // We could simply do obj->incr_age(). However, this causes a
  3656       // performance issue. obj->incr_age() will first check whether
  3657       // the object has a displaced mark by checking its mark word;
  3658       // getting the mark word from the new location of the object
  3659       // stalls. So, given that we already have the mark word and we
  3660       // are about to install it anyway, it's better to increase the
  3661       // age on the mark word, when the object does not have a
  3662       // displaced mark word. We're not expecting many objects to have
  3663       // a displaced marked word, so that case is not optimized
  3664       // further (it could be...) and we simply call obj->incr_age().
  3666       if (m->has_displaced_mark_helper()) {
  3667         // in this case, we have to install the mark word first,
  3668         // otherwise obj looks to be forwarded (the old mark word,
  3669         // which contains the forward pointer, was copied)
  3670         obj->set_mark(m);
  3671         obj->incr_age();
  3672       } else {
  3673         m = m->incr_age();
  3674         obj->set_mark(m);
  3676       _par_scan_state->age_table()->add(obj, word_sz);
  3677     } else {
  3678       obj->set_mark(m);
  3681     // preserve "next" mark bit
  3682     if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
  3683       if (!use_local_bitmaps ||
  3684           !_par_scan_state->alloc_buffer(alloc_purpose)->mark(obj_ptr)) {
  3685         // if we couldn't mark it on the local bitmap (this happens when
  3686         // the object was not allocated in the GCLab), we have to bite
  3687         // the bullet and do the standard parallel mark
  3688         _cm->markAndGrayObjectIfNecessary(obj);
  3690 #if 1
  3691       if (_g1->isMarkedNext(old)) {
  3692         _cm->nextMarkBitMap()->parClear((HeapWord*)old);
  3694 #endif
  3697     size_t* surv_young_words = _par_scan_state->surviving_young_words();
  3698     surv_young_words[young_index] += word_sz;
  3700     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
  3701       arrayOop(old)->set_length(0);
  3702       oop* old_p = set_partial_array_mask(old);
  3703       _par_scan_state->push_on_queue(old_p);
  3704     } else {
  3705       // No point in using the slower heap_region_containing() method,
  3706       // given that we know obj is in the heap.
  3707       _scanner->set_region(_g1->heap_region_containing_raw(obj));
  3708       obj->oop_iterate_backwards(_scanner);
  3710   } else {
  3711     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
  3712     obj = forward_ptr;
  3714   return obj;
  3717 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee, bool skip_cset_test>
  3718 template <class T>
  3719 void G1ParCopyClosure <do_gen_barrier, barrier, do_mark_forwardee, skip_cset_test>
  3720 ::do_oop_work(T* p) {
  3721   oop obj = oopDesc::load_decode_heap_oop(p);
  3722   assert(barrier != G1BarrierRS || obj != NULL,
  3723          "Precondition: G1BarrierRS implies obj is nonNull");
  3725   // The only time we skip the cset test is when we're scanning
  3726   // references popped from the queue. And we only push on the queue
  3727   // references that we know point into the cset, so no point in
  3728   // checking again. But we'll leave an assert here for peace of mind.
  3729   assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
  3731   // here the null check is implicit in the cset_fast_test() test
  3732   if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
  3733 #if G1_REM_SET_LOGGING
  3734     gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
  3735                            "into CS.", p, (void*) obj);
  3736 #endif
  3737     if (obj->is_forwarded()) {
  3738       oopDesc::encode_store_heap_oop(p, obj->forwardee());
  3739     } else {
  3740       oop copy_oop = copy_to_survivor_space(obj);
  3741       oopDesc::encode_store_heap_oop(p, copy_oop);
  3743     // When scanning the RS, we only care about objs in CS.
  3744     if (barrier == G1BarrierRS) {
  3745       _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  3749   // When scanning moved objs, must look at all oops.
  3750   if (barrier == G1BarrierEvac && obj != NULL) {
  3751     _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
  3754   if (do_gen_barrier && obj != NULL) {
  3755     par_do_barrier(p);
  3759 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
  3760 template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(narrowOop* p);
  3762 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
  3763   assert(has_partial_array_mask(p), "invariant");
  3764   oop old = clear_partial_array_mask(p);
  3765   assert(old->is_objArray(), "must be obj array");
  3766   assert(old->is_forwarded(), "must be forwarded");
  3767   assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
  3769   objArrayOop obj = objArrayOop(old->forwardee());
  3770   assert((void*)old != (void*)old->forwardee(), "self forwarding here?");
  3771   // Process ParGCArrayScanChunk elements now
  3772   // and push the remainder back onto queue
  3773   int start     = arrayOop(old)->length();
  3774   int end       = obj->length();
  3775   int remainder = end - start;
  3776   assert(start <= end, "just checking");
  3777   if (remainder > 2 * ParGCArrayScanChunk) {
  3778     // Test above combines last partial chunk with a full chunk
  3779     end = start + ParGCArrayScanChunk;
  3780     arrayOop(old)->set_length(end);
  3781     // Push remainder.
  3782     oop* old_p = set_partial_array_mask(old);
  3783     assert(arrayOop(old)->length() < obj->length(), "Empty push?");
  3784     _par_scan_state->push_on_queue(old_p);
  3785   } else {
  3786     // Restore length so that the heap remains parsable in
  3787     // case of evacuation failure.
  3788     arrayOop(old)->set_length(end);
  3790   _scanner.set_region(_g1->heap_region_containing_raw(obj));
  3791   // process our set of indices (include header in first chunk)
  3792   obj->oop_iterate_range(&_scanner, start, end);
  3795 class G1ParEvacuateFollowersClosure : public VoidClosure {
  3796 protected:
  3797   G1CollectedHeap*              _g1h;
  3798   G1ParScanThreadState*         _par_scan_state;
  3799   RefToScanQueueSet*            _queues;
  3800   ParallelTaskTerminator*       _terminator;
  3802   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
  3803   RefToScanQueueSet*      queues()         { return _queues; }
  3804   ParallelTaskTerminator* terminator()     { return _terminator; }
  3806 public:
  3807   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
  3808                                 G1ParScanThreadState* par_scan_state,
  3809                                 RefToScanQueueSet* queues,
  3810                                 ParallelTaskTerminator* terminator)
  3811     : _g1h(g1h), _par_scan_state(par_scan_state),
  3812       _queues(queues), _terminator(terminator) {}
  3814   void do_void() {
  3815     G1ParScanThreadState* pss = par_scan_state();
  3816     while (true) {
  3817       pss->trim_queue();
  3818       IF_G1_DETAILED_STATS(pss->note_steal_attempt());
  3820       StarTask stolen_task;
  3821       if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
  3822         IF_G1_DETAILED_STATS(pss->note_steal());
  3824         // slightly paranoid tests; I'm trying to catch potential
  3825         // problems before we go into push_on_queue to know where the
  3826         // problem is coming from
  3827         assert((oop*)stolen_task != NULL, "Error");
  3828         if (stolen_task.is_narrow()) {
  3829           assert(UseCompressedOops, "Error");
  3830           narrowOop* p = (narrowOop*) stolen_task;
  3831           assert(has_partial_array_mask(p) ||
  3832                  _g1h->obj_in_cs(oopDesc::load_decode_heap_oop(p)), "Error");
  3833           pss->push_on_queue(p);
  3834         } else {
  3835           oop* p = (oop*) stolen_task;
  3836           assert(has_partial_array_mask(p) || _g1h->obj_in_cs(*p), "Error");
  3837           pss->push_on_queue(p);
  3839         continue;
  3841       pss->start_term_time();
  3842       if (terminator()->offer_termination()) break;
  3843       pss->end_term_time();
  3845     pss->end_term_time();
  3846     pss->retire_alloc_buffers();
  3848 };
  3850 class G1ParTask : public AbstractGangTask {
  3851 protected:
  3852   G1CollectedHeap*       _g1h;
  3853   RefToScanQueueSet      *_queues;
  3854   ParallelTaskTerminator _terminator;
  3855   int _n_workers;
  3857   Mutex _stats_lock;
  3858   Mutex* stats_lock() { return &_stats_lock; }
  3860   size_t getNCards() {
  3861     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
  3862       / G1BlockOffsetSharedArray::N_bytes;
  3865 public:
  3866   G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
  3867     : AbstractGangTask("G1 collection"),
  3868       _g1h(g1h),
  3869       _queues(task_queues),
  3870       _terminator(workers, _queues),
  3871       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
  3872       _n_workers(workers)
  3873   {}
  3875   RefToScanQueueSet* queues() { return _queues; }
  3877   RefToScanQueue *work_queue(int i) {
  3878     return queues()->queue(i);
  3881   void work(int i) {
  3882     if (i >= _n_workers) return;  // no work needed this round
  3883     ResourceMark rm;
  3884     HandleMark   hm;
  3886     G1ParScanThreadState            pss(_g1h, i);
  3887     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
  3888     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
  3889     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
  3891     pss.set_evac_closure(&scan_evac_cl);
  3892     pss.set_evac_failure_closure(&evac_failure_cl);
  3893     pss.set_partial_scan_closure(&partial_scan_cl);
  3895     G1ParScanExtRootClosure         only_scan_root_cl(_g1h, &pss);
  3896     G1ParScanPermClosure            only_scan_perm_cl(_g1h, &pss);
  3897     G1ParScanHeapRSClosure          only_scan_heap_rs_cl(_g1h, &pss);
  3899     G1ParScanAndMarkExtRootClosure  scan_mark_root_cl(_g1h, &pss);
  3900     G1ParScanAndMarkPermClosure     scan_mark_perm_cl(_g1h, &pss);
  3901     G1ParScanAndMarkHeapRSClosure   scan_mark_heap_rs_cl(_g1h, &pss);
  3903     OopsInHeapRegionClosure        *scan_root_cl;
  3904     OopsInHeapRegionClosure        *scan_perm_cl;
  3905     OopsInHeapRegionClosure        *scan_so_cl;
  3907     if (_g1h->g1_policy()->should_initiate_conc_mark()) {
  3908       scan_root_cl = &scan_mark_root_cl;
  3909       scan_perm_cl = &scan_mark_perm_cl;
  3910       scan_so_cl   = &scan_mark_heap_rs_cl;
  3911     } else {
  3912       scan_root_cl = &only_scan_root_cl;
  3913       scan_perm_cl = &only_scan_perm_cl;
  3914       scan_so_cl   = &only_scan_heap_rs_cl;
  3917     pss.start_strong_roots();
  3918     _g1h->g1_process_strong_roots(/* not collecting perm */ false,
  3919                                   SharedHeap::SO_AllClasses,
  3920                                   scan_root_cl,
  3921                                   &only_scan_heap_rs_cl,
  3922                                   scan_so_cl,
  3923                                   scan_perm_cl,
  3924                                   i);
  3925     pss.end_strong_roots();
  3927       double start = os::elapsedTime();
  3928       G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  3929       evac.do_void();
  3930       double elapsed_ms = (os::elapsedTime()-start)*1000.0;
  3931       double term_ms = pss.term_time()*1000.0;
  3932       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
  3933       _g1h->g1_policy()->record_termination_time(i, term_ms);
  3935     if (G1UseSurvivorSpaces) {
  3936       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
  3938     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
  3940     // Clean up any par-expanded rem sets.
  3941     HeapRegionRemSet::par_cleanup();
  3943     MutexLocker x(stats_lock());
  3944     if (ParallelGCVerbose) {
  3945       gclog_or_tty->print("Thread %d complete:\n", i);
  3946 #if G1_DETAILED_STATS
  3947       gclog_or_tty->print("  Pushes: %7d    Pops: %7d   Overflows: %7d   Steals %7d (in %d attempts)\n",
  3948                           pss.pushes(),
  3949                           pss.pops(),
  3950                           pss.overflow_pushes(),
  3951                           pss.steals(),
  3952                           pss.steal_attempts());
  3953 #endif
  3954       double elapsed      = pss.elapsed();
  3955       double strong_roots = pss.strong_roots_time();
  3956       double term         = pss.term_time();
  3957       gclog_or_tty->print("  Elapsed: %7.2f ms.\n"
  3958                           "    Strong roots: %7.2f ms (%6.2f%%)\n"
  3959                           "    Termination:  %7.2f ms (%6.2f%%) (in %d entries)\n",
  3960                           elapsed * 1000.0,
  3961                           strong_roots * 1000.0, (strong_roots*100.0/elapsed),
  3962                           term * 1000.0, (term*100.0/elapsed),
  3963                           pss.term_attempts());
  3964       size_t total_waste = pss.alloc_buffer_waste() + pss.undo_waste();
  3965       gclog_or_tty->print("  Waste: %8dK\n"
  3966                  "    Alloc Buffer: %8dK\n"
  3967                  "    Undo: %8dK\n",
  3968                  (total_waste * HeapWordSize) / K,
  3969                  (pss.alloc_buffer_waste() * HeapWordSize) / K,
  3970                  (pss.undo_waste() * HeapWordSize) / K);
  3973     assert(pss.refs_to_scan() == 0, "Task queue should be empty");
  3974     assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
  3976 };
  3978 // *** Common G1 Evacuation Stuff
  3980 void
  3981 G1CollectedHeap::
  3982 g1_process_strong_roots(bool collecting_perm_gen,
  3983                         SharedHeap::ScanningOption so,
  3984                         OopClosure* scan_non_heap_roots,
  3985                         OopsInHeapRegionClosure* scan_rs,
  3986                         OopsInHeapRegionClosure* scan_so,
  3987                         OopsInGenClosure* scan_perm,
  3988                         int worker_i) {
  3989   // First scan the strong roots, including the perm gen.
  3990   double ext_roots_start = os::elapsedTime();
  3991   double closure_app_time_sec = 0.0;
  3993   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
  3994   BufferingOopsInGenClosure buf_scan_perm(scan_perm);
  3995   buf_scan_perm.set_generation(perm_gen());
  3997   process_strong_roots(collecting_perm_gen, so,
  3998                        &buf_scan_non_heap_roots,
  3999                        &buf_scan_perm);
  4000   // Finish up any enqueued closure apps.
  4001   buf_scan_non_heap_roots.done();
  4002   buf_scan_perm.done();
  4003   double ext_roots_end = os::elapsedTime();
  4004   g1_policy()->reset_obj_copy_time(worker_i);
  4005   double obj_copy_time_sec =
  4006     buf_scan_non_heap_roots.closure_app_seconds() +
  4007     buf_scan_perm.closure_app_seconds();
  4008   g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
  4009   double ext_root_time_ms =
  4010     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
  4011   g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
  4013   // Scan strong roots in mark stack.
  4014   if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
  4015     concurrent_mark()->oops_do(scan_non_heap_roots);
  4017   double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
  4018   g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
  4020   // XXX What should this be doing in the parallel case?
  4021   g1_policy()->record_collection_pause_end_CH_strong_roots();
  4022   if (scan_so != NULL) {
  4023     scan_scan_only_set(scan_so, worker_i);
  4025   // Now scan the complement of the collection set.
  4026   if (scan_rs != NULL) {
  4027     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
  4029   // Finish with the ref_processor roots.
  4030   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
  4031     ref_processor()->oops_do(scan_non_heap_roots);
  4033   g1_policy()->record_collection_pause_end_G1_strong_roots();
  4034   _process_strong_tasks->all_tasks_completed();
  4037 void
  4038 G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
  4039                                        OopsInHeapRegionClosure* oc,
  4040                                        int worker_i) {
  4041   HeapWord* startAddr = r->bottom();
  4042   HeapWord* endAddr = r->used_region().end();
  4044   oc->set_region(r);
  4046   HeapWord* p = r->bottom();
  4047   HeapWord* t = r->top();
  4048   guarantee( p == r->next_top_at_mark_start(), "invariant" );
  4049   while (p < t) {
  4050     oop obj = oop(p);
  4051     p += obj->oop_iterate(oc);
  4055 void
  4056 G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
  4057                                     int worker_i) {
  4058   double start = os::elapsedTime();
  4060   BufferingOopsInHeapRegionClosure boc(oc);
  4062   FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
  4063   FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
  4065   OopsInHeapRegionClosure *foc;
  4066   if (g1_policy()->should_initiate_conc_mark())
  4067     foc = &scan_and_mark;
  4068   else
  4069     foc = &scan_only;
  4071   HeapRegion* hr;
  4072   int n = 0;
  4073   while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
  4074     scan_scan_only_region(hr, foc, worker_i);
  4075     ++n;
  4077   boc.done();
  4079   double closure_app_s = boc.closure_app_seconds();
  4080   g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
  4081   double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
  4082   g1_policy()->record_scan_only_time(worker_i, ms, n);
  4085 void
  4086 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
  4087                                        OopClosure* non_root_closure) {
  4088   SharedHeap::process_weak_roots(root_closure, non_root_closure);
  4092 class SaveMarksClosure: public HeapRegionClosure {
  4093 public:
  4094   bool doHeapRegion(HeapRegion* r) {
  4095     r->save_marks();
  4096     return false;
  4098 };
  4100 void G1CollectedHeap::save_marks() {
  4101   if (ParallelGCThreads == 0) {
  4102     SaveMarksClosure sm;
  4103     heap_region_iterate(&sm);
  4105   // We do this even in the parallel case
  4106   perm_gen()->save_marks();
  4109 void G1CollectedHeap::evacuate_collection_set() {
  4110   set_evacuation_failed(false);
  4112   g1_rem_set()->prepare_for_oops_into_collection_set_do();
  4113   concurrent_g1_refine()->set_use_cache(false);
  4114   concurrent_g1_refine()->clear_hot_cache_claimed_index();
  4116   int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
  4117   set_par_threads(n_workers);
  4118   G1ParTask g1_par_task(this, n_workers, _task_queues);
  4120   init_for_evac_failure(NULL);
  4122   change_strong_roots_parity();  // In preparation for parallel strong roots.
  4123   rem_set()->prepare_for_younger_refs_iterate(true);
  4125   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
  4126   double start_par = os::elapsedTime();
  4127   if (ParallelGCThreads > 0) {
  4128     // The individual threads will set their evac-failure closures.
  4129     workers()->run_task(&g1_par_task);
  4130   } else {
  4131     g1_par_task.work(0);
  4134   double par_time = (os::elapsedTime() - start_par) * 1000.0;
  4135   g1_policy()->record_par_time(par_time);
  4136   set_par_threads(0);
  4137   // Is this the right thing to do here?  We don't save marks
  4138   // on individual heap regions when we allocate from
  4139   // them in parallel, so this seems like the correct place for this.
  4140   retire_all_alloc_regions();
  4142     G1IsAliveClosure is_alive(this);
  4143     G1KeepAliveClosure keep_alive(this);
  4144     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  4146   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  4148   concurrent_g1_refine()->clear_hot_cache();
  4149   concurrent_g1_refine()->set_use_cache(true);
  4151   finalize_for_evac_failure();
  4153   // Must do this before removing self-forwarding pointers, which clears
  4154   // the per-region evac-failure flags.
  4155   concurrent_mark()->complete_marking_in_collection_set();
  4157   if (evacuation_failed()) {
  4158     remove_self_forwarding_pointers();
  4159     if (PrintGCDetails) {
  4160       gclog_or_tty->print(" (evacuation failed)");
  4161     } else if (PrintGC) {
  4162       gclog_or_tty->print("--");
  4166   if (G1DeferredRSUpdate) {
  4167     RedirtyLoggedCardTableEntryFastClosure redirty;
  4168     dirty_card_queue_set().set_closure(&redirty);
  4169     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
  4170     JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
  4171     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
  4174   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  4177 void G1CollectedHeap::free_region(HeapRegion* hr) {
  4178   size_t pre_used = 0;
  4179   size_t cleared_h_regions = 0;
  4180   size_t freed_regions = 0;
  4181   UncleanRegionList local_list;
  4183   HeapWord* start = hr->bottom();
  4184   HeapWord* end   = hr->prev_top_at_mark_start();
  4185   size_t used_bytes = hr->used();
  4186   size_t live_bytes = hr->max_live_bytes();
  4187   if (used_bytes > 0) {
  4188     guarantee( live_bytes <= used_bytes, "invariant" );
  4189   } else {
  4190     guarantee( live_bytes == 0, "invariant" );
  4193   size_t garbage_bytes = used_bytes - live_bytes;
  4194   if (garbage_bytes > 0)
  4195     g1_policy()->decrease_known_garbage_bytes(garbage_bytes);
  4197   free_region_work(hr, pre_used, cleared_h_regions, freed_regions,
  4198                    &local_list);
  4199   finish_free_region_work(pre_used, cleared_h_regions, freed_regions,
  4200                           &local_list);
  4203 void
  4204 G1CollectedHeap::free_region_work(HeapRegion* hr,
  4205                                   size_t& pre_used,
  4206                                   size_t& cleared_h_regions,
  4207                                   size_t& freed_regions,
  4208                                   UncleanRegionList* list,
  4209                                   bool par) {
  4210   pre_used += hr->used();
  4211   if (hr->isHumongous()) {
  4212     assert(hr->startsHumongous(),
  4213            "Only the start of a humongous region should be freed.");
  4214     int ind = _hrs->find(hr);
  4215     assert(ind != -1, "Should have an index.");
  4216     // Clear the start region.
  4217     hr->hr_clear(par, true /*clear_space*/);
  4218     list->insert_before_head(hr);
  4219     cleared_h_regions++;
  4220     freed_regions++;
  4221     // Clear any continued regions.
  4222     ind++;
  4223     while ((size_t)ind < n_regions()) {
  4224       HeapRegion* hrc = _hrs->at(ind);
  4225       if (!hrc->continuesHumongous()) break;
  4226       // Otherwise, does continue the H region.
  4227       assert(hrc->humongous_start_region() == hr, "Huh?");
  4228       hrc->hr_clear(par, true /*clear_space*/);
  4229       cleared_h_regions++;
  4230       freed_regions++;
  4231       list->insert_before_head(hrc);
  4232       ind++;
  4234   } else {
  4235     hr->hr_clear(par, true /*clear_space*/);
  4236     list->insert_before_head(hr);
  4237     freed_regions++;
  4238     // If we're using clear2, this should not be enabled.
  4239     // assert(!hr->in_cohort(), "Can't be both free and in a cohort.");
  4243 void G1CollectedHeap::finish_free_region_work(size_t pre_used,
  4244                                               size_t cleared_h_regions,
  4245                                               size_t freed_regions,
  4246                                               UncleanRegionList* list) {
  4247   if (list != NULL && list->sz() > 0) {
  4248     prepend_region_list_on_unclean_list(list);
  4250   // Acquire a lock, if we're parallel, to update possibly-shared
  4251   // variables.
  4252   Mutex* lock = (n_par_threads() > 0) ? ParGCRareEvent_lock : NULL;
  4254     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  4255     _summary_bytes_used -= pre_used;
  4256     _num_humongous_regions -= (int) cleared_h_regions;
  4257     _free_regions += freed_regions;
  4262 void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
  4263   while (list != NULL) {
  4264     guarantee( list->is_young(), "invariant" );
  4266     HeapWord* bottom = list->bottom();
  4267     HeapWord* end = list->end();
  4268     MemRegion mr(bottom, end);
  4269     ct_bs->dirty(mr);
  4271     list = list->get_next_young_region();
  4276 class G1ParCleanupCTTask : public AbstractGangTask {
  4277   CardTableModRefBS* _ct_bs;
  4278   G1CollectedHeap* _g1h;
  4279 public:
  4280   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
  4281                      G1CollectedHeap* g1h) :
  4282     AbstractGangTask("G1 Par Cleanup CT Task"),
  4283     _ct_bs(ct_bs),
  4284     _g1h(g1h)
  4285   { }
  4287   void work(int i) {
  4288     HeapRegion* r;
  4289     while (r = _g1h->pop_dirty_cards_region()) {
  4290       clear_cards(r);
  4293   void clear_cards(HeapRegion* r) {
  4294     // Cards for Survivor and Scan-Only regions will be dirtied later.
  4295     if (!r->is_scan_only() && !r->is_survivor()) {
  4296       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
  4299 };
  4302 void G1CollectedHeap::cleanUpCardTable() {
  4303   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
  4304   double start = os::elapsedTime();
  4306   // Iterate over the dirty cards region list.
  4307   G1ParCleanupCTTask cleanup_task(ct_bs, this);
  4308   if (ParallelGCThreads > 0) {
  4309     set_par_threads(workers()->total_workers());
  4310     workers()->run_task(&cleanup_task);
  4311     set_par_threads(0);
  4312   } else {
  4313     while (_dirty_cards_region_list) {
  4314       HeapRegion* r = _dirty_cards_region_list;
  4315       cleanup_task.clear_cards(r);
  4316       _dirty_cards_region_list = r->get_next_dirty_cards_region();
  4317       if (_dirty_cards_region_list == r) {
  4318         // The last region.
  4319         _dirty_cards_region_list = NULL;
  4321       r->set_next_dirty_cards_region(NULL);
  4324   // now, redirty the cards of the scan-only and survivor regions
  4325   // (it seemed faster to do it this way, instead of iterating over
  4326   // all regions and then clearing / dirtying as appropriate)
  4327   dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
  4328   dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
  4330   double elapsed = os::elapsedTime() - start;
  4331   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
  4335 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
  4336   if (g1_policy()->should_do_collection_pause(word_size)) {
  4337     do_collection_pause();
  4341 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  4342   double young_time_ms     = 0.0;
  4343   double non_young_time_ms = 0.0;
  4345   G1CollectorPolicy* policy = g1_policy();
  4347   double start_sec = os::elapsedTime();
  4348   bool non_young = true;
  4350   HeapRegion* cur = cs_head;
  4351   int age_bound = -1;
  4352   size_t rs_lengths = 0;
  4354   while (cur != NULL) {
  4355     if (non_young) {
  4356       if (cur->is_young()) {
  4357         double end_sec = os::elapsedTime();
  4358         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  4359         non_young_time_ms += elapsed_ms;
  4361         start_sec = os::elapsedTime();
  4362         non_young = false;
  4364     } else {
  4365       if (!cur->is_on_free_list()) {
  4366         double end_sec = os::elapsedTime();
  4367         double elapsed_ms = (end_sec - start_sec) * 1000.0;
  4368         young_time_ms += elapsed_ms;
  4370         start_sec = os::elapsedTime();
  4371         non_young = true;
  4375     rs_lengths += cur->rem_set()->occupied();
  4377     HeapRegion* next = cur->next_in_collection_set();
  4378     assert(cur->in_collection_set(), "bad CS");
  4379     cur->set_next_in_collection_set(NULL);
  4380     cur->set_in_collection_set(false);
  4382     if (cur->is_young()) {
  4383       int index = cur->young_index_in_cset();
  4384       guarantee( index != -1, "invariant" );
  4385       guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
  4386       size_t words_survived = _surviving_young_words[index];
  4387       cur->record_surv_words_in_group(words_survived);
  4388     } else {
  4389       int index = cur->young_index_in_cset();
  4390       guarantee( index == -1, "invariant" );
  4393     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
  4394             (!cur->is_young() && cur->young_index_in_cset() == -1),
  4395             "invariant" );
  4397     if (!cur->evacuation_failed()) {
  4398       // And the region is empty.
  4399       assert(!cur->is_empty(),
  4400              "Should not have empty regions in a CS.");
  4401       free_region(cur);
  4402     } else {
  4403       guarantee( !cur->is_scan_only(), "should not be scan only" );
  4404       cur->uninstall_surv_rate_group();
  4405       if (cur->is_young())
  4406         cur->set_young_index_in_cset(-1);
  4407       cur->set_not_young();
  4408       cur->set_evacuation_failed(false);
  4410     cur = next;
  4413   policy->record_max_rs_lengths(rs_lengths);
  4414   policy->cset_regions_freed();
  4416   double end_sec = os::elapsedTime();
  4417   double elapsed_ms = (end_sec - start_sec) * 1000.0;
  4418   if (non_young)
  4419     non_young_time_ms += elapsed_ms;
  4420   else
  4421     young_time_ms += elapsed_ms;
  4423   policy->record_young_free_cset_time_ms(young_time_ms);
  4424   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
  4427 HeapRegion*
  4428 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
  4429   assert(ZF_mon->owned_by_self(), "Precondition");
  4430   HeapRegion* res = pop_unclean_region_list_locked();
  4431   if (res != NULL) {
  4432     assert(!res->continuesHumongous() &&
  4433            res->zero_fill_state() != HeapRegion::Allocated,
  4434            "Only free regions on unclean list.");
  4435     if (zero_filled) {
  4436       res->ensure_zero_filled_locked();
  4437       res->set_zero_fill_allocated();
  4440   return res;
  4443 HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list(bool zero_filled) {
  4444   MutexLockerEx zx(ZF_mon, Mutex::_no_safepoint_check_flag);
  4445   return alloc_region_from_unclean_list_locked(zero_filled);
  4448 void G1CollectedHeap::put_region_on_unclean_list(HeapRegion* r) {
  4449   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4450   put_region_on_unclean_list_locked(r);
  4451   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  4454 void G1CollectedHeap::set_unclean_regions_coming(bool b) {
  4455   MutexLockerEx x(Cleanup_mon);
  4456   set_unclean_regions_coming_locked(b);
  4459 void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
  4460   assert(Cleanup_mon->owned_by_self(), "Precondition");
  4461   _unclean_regions_coming = b;
  4462   // Wake up mutator threads that might be waiting for completeCleanup to
  4463   // finish.
  4464   if (!b) Cleanup_mon->notify_all();
  4467 void G1CollectedHeap::wait_for_cleanup_complete() {
  4468   MutexLockerEx x(Cleanup_mon);
  4469   wait_for_cleanup_complete_locked();
  4472 void G1CollectedHeap::wait_for_cleanup_complete_locked() {
  4473   assert(Cleanup_mon->owned_by_self(), "precondition");
  4474   while (_unclean_regions_coming) {
  4475     Cleanup_mon->wait();
  4479 void
  4480 G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
  4481   assert(ZF_mon->owned_by_self(), "precondition.");
  4482   _unclean_region_list.insert_before_head(r);
  4485 void
  4486 G1CollectedHeap::prepend_region_list_on_unclean_list(UncleanRegionList* list) {
  4487   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4488   prepend_region_list_on_unclean_list_locked(list);
  4489   if (should_zf()) ZF_mon->notify_all(); // Wake up ZF thread.
  4492 void
  4493 G1CollectedHeap::
  4494 prepend_region_list_on_unclean_list_locked(UncleanRegionList* list) {
  4495   assert(ZF_mon->owned_by_self(), "precondition.");
  4496   _unclean_region_list.prepend_list(list);
  4499 HeapRegion* G1CollectedHeap::pop_unclean_region_list_locked() {
  4500   assert(ZF_mon->owned_by_self(), "precondition.");
  4501   HeapRegion* res = _unclean_region_list.pop();
  4502   if (res != NULL) {
  4503     // Inform ZF thread that there's a new unclean head.
  4504     if (_unclean_region_list.hd() != NULL && should_zf())
  4505       ZF_mon->notify_all();
  4507   return res;
  4510 HeapRegion* G1CollectedHeap::peek_unclean_region_list_locked() {
  4511   assert(ZF_mon->owned_by_self(), "precondition.");
  4512   return _unclean_region_list.hd();
  4516 bool G1CollectedHeap::move_cleaned_region_to_free_list_locked() {
  4517   assert(ZF_mon->owned_by_self(), "Precondition");
  4518   HeapRegion* r = peek_unclean_region_list_locked();
  4519   if (r != NULL && r->zero_fill_state() == HeapRegion::ZeroFilled) {
  4520     // Result of below must be equal to "r", since we hold the lock.
  4521     (void)pop_unclean_region_list_locked();
  4522     put_free_region_on_list_locked(r);
  4523     return true;
  4524   } else {
  4525     return false;
  4529 bool G1CollectedHeap::move_cleaned_region_to_free_list() {
  4530   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4531   return move_cleaned_region_to_free_list_locked();
  4535 void G1CollectedHeap::put_free_region_on_list_locked(HeapRegion* r) {
  4536   assert(ZF_mon->owned_by_self(), "precondition.");
  4537   assert(_free_region_list_size == free_region_list_length(), "Inv");
  4538   assert(r->zero_fill_state() == HeapRegion::ZeroFilled,
  4539         "Regions on free list must be zero filled");
  4540   assert(!r->isHumongous(), "Must not be humongous.");
  4541   assert(r->is_empty(), "Better be empty");
  4542   assert(!r->is_on_free_list(),
  4543          "Better not already be on free list");
  4544   assert(!r->is_on_unclean_list(),
  4545          "Better not already be on unclean list");
  4546   r->set_on_free_list(true);
  4547   r->set_next_on_free_list(_free_region_list);
  4548   _free_region_list = r;
  4549   _free_region_list_size++;
  4550   assert(_free_region_list_size == free_region_list_length(), "Inv");
  4553 void G1CollectedHeap::put_free_region_on_list(HeapRegion* r) {
  4554   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4555   put_free_region_on_list_locked(r);
  4558 HeapRegion* G1CollectedHeap::pop_free_region_list_locked() {
  4559   assert(ZF_mon->owned_by_self(), "precondition.");
  4560   assert(_free_region_list_size == free_region_list_length(), "Inv");
  4561   HeapRegion* res = _free_region_list;
  4562   if (res != NULL) {
  4563     _free_region_list = res->next_from_free_list();
  4564     _free_region_list_size--;
  4565     res->set_on_free_list(false);
  4566     res->set_next_on_free_list(NULL);
  4567     assert(_free_region_list_size == free_region_list_length(), "Inv");
  4569   return res;
  4573 HeapRegion* G1CollectedHeap::alloc_free_region_from_lists(bool zero_filled) {
  4574   // By self, or on behalf of self.
  4575   assert(Heap_lock->is_locked(), "Precondition");
  4576   HeapRegion* res = NULL;
  4577   bool first = true;
  4578   while (res == NULL) {
  4579     if (zero_filled || !first) {
  4580       MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4581       res = pop_free_region_list_locked();
  4582       if (res != NULL) {
  4583         assert(!res->zero_fill_is_allocated(),
  4584                "No allocated regions on free list.");
  4585         res->set_zero_fill_allocated();
  4586       } else if (!first) {
  4587         break;  // We tried both, time to return NULL.
  4591     if (res == NULL) {
  4592       res = alloc_region_from_unclean_list(zero_filled);
  4594     assert(res == NULL ||
  4595            !zero_filled ||
  4596            res->zero_fill_is_allocated(),
  4597            "We must have allocated the region we're returning");
  4598     first = false;
  4600   return res;
  4603 void G1CollectedHeap::remove_allocated_regions_from_lists() {
  4604   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4606     HeapRegion* prev = NULL;
  4607     HeapRegion* cur = _unclean_region_list.hd();
  4608     while (cur != NULL) {
  4609       HeapRegion* next = cur->next_from_unclean_list();
  4610       if (cur->zero_fill_is_allocated()) {
  4611         // Remove from the list.
  4612         if (prev == NULL) {
  4613           (void)_unclean_region_list.pop();
  4614         } else {
  4615           _unclean_region_list.delete_after(prev);
  4617         cur->set_on_unclean_list(false);
  4618         cur->set_next_on_unclean_list(NULL);
  4619       } else {
  4620         prev = cur;
  4622       cur = next;
  4624     assert(_unclean_region_list.sz() == unclean_region_list_length(),
  4625            "Inv");
  4629     HeapRegion* prev = NULL;
  4630     HeapRegion* cur = _free_region_list;
  4631     while (cur != NULL) {
  4632       HeapRegion* next = cur->next_from_free_list();
  4633       if (cur->zero_fill_is_allocated()) {
  4634         // Remove from the list.
  4635         if (prev == NULL) {
  4636           _free_region_list = cur->next_from_free_list();
  4637         } else {
  4638           prev->set_next_on_free_list(cur->next_from_free_list());
  4640         cur->set_on_free_list(false);
  4641         cur->set_next_on_free_list(NULL);
  4642         _free_region_list_size--;
  4643       } else {
  4644         prev = cur;
  4646       cur = next;
  4648     assert(_free_region_list_size == free_region_list_length(), "Inv");
  4652 bool G1CollectedHeap::verify_region_lists() {
  4653   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4654   return verify_region_lists_locked();
  4657 bool G1CollectedHeap::verify_region_lists_locked() {
  4658   HeapRegion* unclean = _unclean_region_list.hd();
  4659   while (unclean != NULL) {
  4660     guarantee(unclean->is_on_unclean_list(), "Well, it is!");
  4661     guarantee(!unclean->is_on_free_list(), "Well, it shouldn't be!");
  4662     guarantee(unclean->zero_fill_state() != HeapRegion::Allocated,
  4663               "Everything else is possible.");
  4664     unclean = unclean->next_from_unclean_list();
  4666   guarantee(_unclean_region_list.sz() == unclean_region_list_length(), "Inv");
  4668   HeapRegion* free_r = _free_region_list;
  4669   while (free_r != NULL) {
  4670     assert(free_r->is_on_free_list(), "Well, it is!");
  4671     assert(!free_r->is_on_unclean_list(), "Well, it shouldn't be!");
  4672     switch (free_r->zero_fill_state()) {
  4673     case HeapRegion::NotZeroFilled:
  4674     case HeapRegion::ZeroFilling:
  4675       guarantee(false, "Should not be on free list.");
  4676       break;
  4677     default:
  4678       // Everything else is possible.
  4679       break;
  4681     free_r = free_r->next_from_free_list();
  4683   guarantee(_free_region_list_size == free_region_list_length(), "Inv");
  4684   // If we didn't do an assertion...
  4685   return true;
  4688 size_t G1CollectedHeap::free_region_list_length() {
  4689   assert(ZF_mon->owned_by_self(), "precondition.");
  4690   size_t len = 0;
  4691   HeapRegion* cur = _free_region_list;
  4692   while (cur != NULL) {
  4693     len++;
  4694     cur = cur->next_from_free_list();
  4696   return len;
  4699 size_t G1CollectedHeap::unclean_region_list_length() {
  4700   assert(ZF_mon->owned_by_self(), "precondition.");
  4701   return _unclean_region_list.length();
  4704 size_t G1CollectedHeap::n_regions() {
  4705   return _hrs->length();
  4708 size_t G1CollectedHeap::max_regions() {
  4709   return
  4710     (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) /
  4711     HeapRegion::GrainBytes;
  4714 size_t G1CollectedHeap::free_regions() {
  4715   /* Possibly-expensive assert.
  4716   assert(_free_regions == count_free_regions(),
  4717          "_free_regions is off.");
  4718   */
  4719   return _free_regions;
  4722 bool G1CollectedHeap::should_zf() {
  4723   return _free_region_list_size < (size_t) G1ConcZFMaxRegions;
  4726 class RegionCounter: public HeapRegionClosure {
  4727   size_t _n;
  4728 public:
  4729   RegionCounter() : _n(0) {}
  4730   bool doHeapRegion(HeapRegion* r) {
  4731     if (r->is_empty()) {
  4732       assert(!r->isHumongous(), "H regions should not be empty.");
  4733       _n++;
  4735     return false;
  4737   int res() { return (int) _n; }
  4738 };
  4740 size_t G1CollectedHeap::count_free_regions() {
  4741   RegionCounter rc;
  4742   heap_region_iterate(&rc);
  4743   size_t n = rc.res();
  4744   if (_cur_alloc_region != NULL && _cur_alloc_region->is_empty())
  4745     n--;
  4746   return n;
  4749 size_t G1CollectedHeap::count_free_regions_list() {
  4750   size_t n = 0;
  4751   size_t o = 0;
  4752   ZF_mon->lock_without_safepoint_check();
  4753   HeapRegion* cur = _free_region_list;
  4754   while (cur != NULL) {
  4755     cur = cur->next_from_free_list();
  4756     n++;
  4758   size_t m = unclean_region_list_length();
  4759   ZF_mon->unlock();
  4760   return n + m;
  4763 bool G1CollectedHeap::should_set_young_locked() {
  4764   assert(heap_lock_held_for_gc(),
  4765               "the heap lock should already be held by or for this thread");
  4766   return  (g1_policy()->in_young_gc_mode() &&
  4767            g1_policy()->should_add_next_region_to_young_list());
  4770 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
  4771   assert(heap_lock_held_for_gc(),
  4772               "the heap lock should already be held by or for this thread");
  4773   _young_list->push_region(hr);
  4774   g1_policy()->set_region_short_lived(hr);
  4777 class NoYoungRegionsClosure: public HeapRegionClosure {
  4778 private:
  4779   bool _success;
  4780 public:
  4781   NoYoungRegionsClosure() : _success(true) { }
  4782   bool doHeapRegion(HeapRegion* r) {
  4783     if (r->is_young()) {
  4784       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
  4785                              r->bottom(), r->end());
  4786       _success = false;
  4788     return false;
  4790   bool success() { return _success; }
  4791 };
  4793 bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
  4794                                              bool check_sample) {
  4795   bool ret = true;
  4797   ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
  4798   if (!ignore_scan_only_list) {
  4799     NoYoungRegionsClosure closure;
  4800     heap_region_iterate(&closure);
  4801     ret = ret && closure.success();
  4804   return ret;
  4807 void G1CollectedHeap::empty_young_list() {
  4808   assert(heap_lock_held_for_gc(),
  4809               "the heap lock should already be held by or for this thread");
  4810   assert(g1_policy()->in_young_gc_mode(), "should be in young GC mode");
  4812   _young_list->empty_list();
  4815 bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
  4816   bool no_allocs = true;
  4817   for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
  4818     HeapRegion* r = _gc_alloc_regions[ap];
  4819     no_allocs = r == NULL || r->saved_mark_at_top();
  4821   return no_allocs;
  4824 void G1CollectedHeap::retire_all_alloc_regions() {
  4825   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
  4826     HeapRegion* r = _gc_alloc_regions[ap];
  4827     if (r != NULL) {
  4828       // Check for aliases.
  4829       bool has_processed_alias = false;
  4830       for (int i = 0; i < ap; ++i) {
  4831         if (_gc_alloc_regions[i] == r) {
  4832           has_processed_alias = true;
  4833           break;
  4836       if (!has_processed_alias) {
  4837         retire_alloc_region(r, false /* par */);
  4844 // Done at the start of full GC.
  4845 void G1CollectedHeap::tear_down_region_lists() {
  4846   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4847   while (pop_unclean_region_list_locked() != NULL) ;
  4848   assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
  4849          "Postconditions of loop.")
  4850   while (pop_free_region_list_locked() != NULL) ;
  4851   assert(_free_region_list == NULL, "Postcondition of loop.");
  4852   if (_free_region_list_size != 0) {
  4853     gclog_or_tty->print_cr("Size is %d.", _free_region_list_size);
  4854     print_on(gclog_or_tty, true /* extended */);
  4856   assert(_free_region_list_size == 0, "Postconditions of loop.");
  4860 class RegionResetter: public HeapRegionClosure {
  4861   G1CollectedHeap* _g1;
  4862   int _n;
  4863 public:
  4864   RegionResetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  4865   bool doHeapRegion(HeapRegion* r) {
  4866     if (r->continuesHumongous()) return false;
  4867     if (r->top() > r->bottom()) {
  4868       if (r->top() < r->end()) {
  4869         Copy::fill_to_words(r->top(),
  4870                           pointer_delta(r->end(), r->top()));
  4872       r->set_zero_fill_allocated();
  4873     } else {
  4874       assert(r->is_empty(), "tautology");
  4875       _n++;
  4876       switch (r->zero_fill_state()) {
  4877         case HeapRegion::NotZeroFilled:
  4878         case HeapRegion::ZeroFilling:
  4879           _g1->put_region_on_unclean_list_locked(r);
  4880           break;
  4881         case HeapRegion::Allocated:
  4882           r->set_zero_fill_complete();
  4883           // no break; go on to put on free list.
  4884         case HeapRegion::ZeroFilled:
  4885           _g1->put_free_region_on_list_locked(r);
  4886           break;
  4889     return false;
  4892   int getFreeRegionCount() {return _n;}
  4893 };
  4895 // Done at the end of full GC.
  4896 void G1CollectedHeap::rebuild_region_lists() {
  4897   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4898   // This needs to go at the end of the full GC.
  4899   RegionResetter rs;
  4900   heap_region_iterate(&rs);
  4901   _free_regions = rs.getFreeRegionCount();
  4902   // Tell the ZF thread it may have work to do.
  4903   if (should_zf()) ZF_mon->notify_all();
  4906 class UsedRegionsNeedZeroFillSetter: public HeapRegionClosure {
  4907   G1CollectedHeap* _g1;
  4908   int _n;
  4909 public:
  4910   UsedRegionsNeedZeroFillSetter() : _g1(G1CollectedHeap::heap()), _n(0) {}
  4911   bool doHeapRegion(HeapRegion* r) {
  4912     if (r->continuesHumongous()) return false;
  4913     if (r->top() > r->bottom()) {
  4914       // There are assertions in "set_zero_fill_needed()" below that
  4915       // require top() == bottom(), so this is technically illegal.
  4916       // We'll skirt the law here, by making that true temporarily.
  4917       DEBUG_ONLY(HeapWord* save_top = r->top();
  4918                  r->set_top(r->bottom()));
  4919       r->set_zero_fill_needed();
  4920       DEBUG_ONLY(r->set_top(save_top));
  4922     return false;
  4924 };
  4926 // Done at the start of full GC.
  4927 void G1CollectedHeap::set_used_regions_to_need_zero_fill() {
  4928   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
  4929   // This needs to go at the end of the full GC.
  4930   UsedRegionsNeedZeroFillSetter rs;
  4931   heap_region_iterate(&rs);
  4934 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
  4935   _refine_cte_cl->set_concurrent(concurrent);
  4938 #ifndef PRODUCT
  4940 class PrintHeapRegionClosure: public HeapRegionClosure {
  4941 public:
  4942   bool doHeapRegion(HeapRegion *r) {
  4943     gclog_or_tty->print("Region: "PTR_FORMAT":", r);
  4944     if (r != NULL) {
  4945       if (r->is_on_free_list())
  4946         gclog_or_tty->print("Free ");
  4947       if (r->is_young())
  4948         gclog_or_tty->print("Young ");
  4949       if (r->isHumongous())
  4950         gclog_or_tty->print("Is Humongous ");
  4951       r->print();
  4953     return false;
  4955 };
  4957 class SortHeapRegionClosure : public HeapRegionClosure {
  4958   size_t young_regions,free_regions, unclean_regions;
  4959   size_t hum_regions, count;
  4960   size_t unaccounted, cur_unclean, cur_alloc;
  4961   size_t total_free;
  4962   HeapRegion* cur;
  4963 public:
  4964   SortHeapRegionClosure(HeapRegion *_cur) : cur(_cur), young_regions(0),
  4965     free_regions(0), unclean_regions(0),
  4966     hum_regions(0),
  4967     count(0), unaccounted(0),
  4968     cur_alloc(0), total_free(0)
  4969   {}
  4970   bool doHeapRegion(HeapRegion *r) {
  4971     count++;
  4972     if (r->is_on_free_list()) free_regions++;
  4973     else if (r->is_on_unclean_list()) unclean_regions++;
  4974     else if (r->isHumongous())  hum_regions++;
  4975     else if (r->is_young()) young_regions++;
  4976     else if (r == cur) cur_alloc++;
  4977     else unaccounted++;
  4978     return false;
  4980   void print() {
  4981     total_free = free_regions + unclean_regions;
  4982     gclog_or_tty->print("%d regions\n", count);
  4983     gclog_or_tty->print("%d free: free_list = %d unclean = %d\n",
  4984                         total_free, free_regions, unclean_regions);
  4985     gclog_or_tty->print("%d humongous %d young\n",
  4986                         hum_regions, young_regions);
  4987     gclog_or_tty->print("%d cur_alloc\n", cur_alloc);
  4988     gclog_or_tty->print("UHOH unaccounted = %d\n", unaccounted);
  4990 };
  4992 void G1CollectedHeap::print_region_counts() {
  4993   SortHeapRegionClosure sc(_cur_alloc_region);
  4994   PrintHeapRegionClosure cl;
  4995   heap_region_iterate(&cl);
  4996   heap_region_iterate(&sc);
  4997   sc.print();
  4998   print_region_accounting_info();
  4999 };
  5001 bool G1CollectedHeap::regions_accounted_for() {
  5002   // TODO: regions accounting for young/survivor/tenured
  5003   return true;
  5006 bool G1CollectedHeap::print_region_accounting_info() {
  5007   gclog_or_tty->print_cr("Free regions: %d (count: %d count list %d) (clean: %d unclean: %d).",
  5008                          free_regions(),
  5009                          count_free_regions(), count_free_regions_list(),
  5010                          _free_region_list_size, _unclean_region_list.sz());
  5011   gclog_or_tty->print_cr("cur_alloc: %d.",
  5012                          (_cur_alloc_region == NULL ? 0 : 1));
  5013   gclog_or_tty->print_cr("H regions: %d.", _num_humongous_regions);
  5015   // TODO: check regions accounting for young/survivor/tenured
  5016   return true;
  5019 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
  5020   HeapRegion* hr = heap_region_containing(p);
  5021   if (hr == NULL) {
  5022     return is_in_permanent(p);
  5023   } else {
  5024     return hr->is_in(p);
  5027 #endif // PRODUCT
  5029 void G1CollectedHeap::g1_unimplemented() {
  5030   // Unimplemented();

mercurial