src/share/vm/gc_implementation/g1/concurrentMark.cpp

Wed, 10 Sep 2014 13:01:13 -0700

author
jcoomes
date
Wed, 10 Sep 2014 13:01:13 -0700
changeset 7159
e5668dcf12e9
parent 7100
edb5f3b38aab
child 7195
c02ec279b062
permissions
-rw-r--r--

8057818: collect allocation context statistics at gc pauses
Reviewed-by: mikael, jmasa

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "code/codeCache.hpp"
    28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
    29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    32 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    33 #include "gc_implementation/g1/g1Log.hpp"
    34 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    35 #include "gc_implementation/g1/g1RemSet.hpp"
    36 #include "gc_implementation/g1/heapRegion.inline.hpp"
    37 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
    38 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    39 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
    40 #include "gc_implementation/shared/vmGCOperations.hpp"
    41 #include "gc_implementation/shared/gcTimer.hpp"
    42 #include "gc_implementation/shared/gcTrace.hpp"
    43 #include "gc_implementation/shared/gcTraceTime.hpp"
    44 #include "memory/allocation.hpp"
    45 #include "memory/genOopClosures.inline.hpp"
    46 #include "memory/referencePolicy.hpp"
    47 #include "memory/resourceArea.hpp"
    48 #include "oops/oop.inline.hpp"
    49 #include "runtime/handles.inline.hpp"
    50 #include "runtime/java.hpp"
    51 #include "runtime/prefetch.inline.hpp"
    52 #include "services/memTracker.hpp"
    54 // Concurrent marking bit map wrapper
    56 CMBitMapRO::CMBitMapRO(int shifter) :
    57   _bm(),
    58   _shifter(shifter) {
    59   _bmStartWord = 0;
    60   _bmWordSize = 0;
    61 }
    63 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
    64                                                const HeapWord* limit) const {
    65   // First we must round addr *up* to a possible object boundary.
    66   addr = (HeapWord*)align_size_up((intptr_t)addr,
    67                                   HeapWordSize << _shifter);
    68   size_t addrOffset = heapWordToOffset(addr);
    69   if (limit == NULL) {
    70     limit = _bmStartWord + _bmWordSize;
    71   }
    72   size_t limitOffset = heapWordToOffset(limit);
    73   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
    74   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
    75   assert(nextAddr >= addr, "get_next_one postcondition");
    76   assert(nextAddr == limit || isMarked(nextAddr),
    77          "get_next_one postcondition");
    78   return nextAddr;
    79 }
    81 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
    82                                                  const HeapWord* limit) const {
    83   size_t addrOffset = heapWordToOffset(addr);
    84   if (limit == NULL) {
    85     limit = _bmStartWord + _bmWordSize;
    86   }
    87   size_t limitOffset = heapWordToOffset(limit);
    88   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
    89   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
    90   assert(nextAddr >= addr, "get_next_one postcondition");
    91   assert(nextAddr == limit || !isMarked(nextAddr),
    92          "get_next_one postcondition");
    93   return nextAddr;
    94 }
    96 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
    97   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
    98   return (int) (diff >> _shifter);
    99 }
   101 #ifndef PRODUCT
   102 bool CMBitMapRO::covers(MemRegion heap_rs) const {
   103   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
   104   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
   105          "size inconsistency");
   106   return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
   107          _bmWordSize  == heap_rs.word_size();
   108 }
   109 #endif
   111 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
   112   _bm.print_on_error(st, prefix);
   113 }
   115 size_t CMBitMap::compute_size(size_t heap_size) {
   116   return heap_size / mark_distance();
   117 }
   119 size_t CMBitMap::mark_distance() {
   120   return MinObjAlignmentInBytes * BitsPerByte;
   121 }
   123 void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
   124   _bmStartWord = heap.start();
   125   _bmWordSize = heap.word_size();
   127   _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
   128   _bm.set_size(_bmWordSize >> _shifter);
   130   storage->set_mapping_changed_listener(&_listener);
   131 }
   133 void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) {
   134   // We need to clear the bitmap on commit, removing any existing information.
   135   MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
   136   _bm->clearRange(mr);
   137 }
   139 // Closure used for clearing the given mark bitmap.
   140 class ClearBitmapHRClosure : public HeapRegionClosure {
   141  private:
   142   ConcurrentMark* _cm;
   143   CMBitMap* _bitmap;
   144   bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
   145  public:
   146   ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
   147     assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
   148   }
   150   virtual bool doHeapRegion(HeapRegion* r) {
   151     size_t const chunk_size_in_words = M / HeapWordSize;
   153     HeapWord* cur = r->bottom();
   154     HeapWord* const end = r->end();
   156     while (cur < end) {
   157       MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
   158       _bitmap->clearRange(mr);
   160       cur += chunk_size_in_words;
   162       // Abort iteration if after yielding the marking has been aborted.
   163       if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
   164         return true;
   165       }
   166       // Repeat the asserts from before the start of the closure. We will do them
   167       // as asserts here to minimize their overhead on the product. However, we
   168       // will have them as guarantees at the beginning / end of the bitmap
   169       // clearing to get some checking in the product.
   170       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
   171       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
   172     }
   174     return false;
   175   }
   176 };
   178 void CMBitMap::clearAll() {
   179   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
   180   G1CollectedHeap::heap()->heap_region_iterate(&cl);
   181   guarantee(cl.complete(), "Must have completed iteration.");
   182   return;
   183 }
   185 void CMBitMap::markRange(MemRegion mr) {
   186   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
   187   assert(!mr.is_empty(), "unexpected empty region");
   188   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
   189           ((HeapWord *) mr.end())),
   190          "markRange memory region end is not card aligned");
   191   // convert address range into offset range
   192   _bm.at_put_range(heapWordToOffset(mr.start()),
   193                    heapWordToOffset(mr.end()), true);
   194 }
   196 void CMBitMap::clearRange(MemRegion mr) {
   197   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
   198   assert(!mr.is_empty(), "unexpected empty region");
   199   // convert address range into offset range
   200   _bm.at_put_range(heapWordToOffset(mr.start()),
   201                    heapWordToOffset(mr.end()), false);
   202 }
   204 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
   205                                             HeapWord* end_addr) {
   206   HeapWord* start = getNextMarkedWordAddress(addr);
   207   start = MIN2(start, end_addr);
   208   HeapWord* end   = getNextUnmarkedWordAddress(start);
   209   end = MIN2(end, end_addr);
   210   assert(start <= end, "Consistency check");
   211   MemRegion mr(start, end);
   212   if (!mr.is_empty()) {
   213     clearRange(mr);
   214   }
   215   return mr;
   216 }
   218 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
   219   _base(NULL), _cm(cm)
   220 #ifdef ASSERT
   221   , _drain_in_progress(false)
   222   , _drain_in_progress_yields(false)
   223 #endif
   224 {}
   226 bool CMMarkStack::allocate(size_t capacity) {
   227   // allocate a stack of the requisite depth
   228   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
   229   if (!rs.is_reserved()) {
   230     warning("ConcurrentMark MarkStack allocation failure");
   231     return false;
   232   }
   233   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
   234   if (!_virtual_space.initialize(rs, rs.size())) {
   235     warning("ConcurrentMark MarkStack backing store failure");
   236     // Release the virtual memory reserved for the marking stack
   237     rs.release();
   238     return false;
   239   }
   240   assert(_virtual_space.committed_size() == rs.size(),
   241          "Didn't reserve backing store for all of ConcurrentMark stack?");
   242   _base = (oop*) _virtual_space.low();
   243   setEmpty();
   244   _capacity = (jint) capacity;
   245   _saved_index = -1;
   246   _should_expand = false;
   247   NOT_PRODUCT(_max_depth = 0);
   248   return true;
   249 }
   251 void CMMarkStack::expand() {
   252   // Called, during remark, if we've overflown the marking stack during marking.
   253   assert(isEmpty(), "stack should been emptied while handling overflow");
   254   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
   255   // Clear expansion flag
   256   _should_expand = false;
   257   if (_capacity == (jint) MarkStackSizeMax) {
   258     if (PrintGCDetails && Verbose) {
   259       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
   260     }
   261     return;
   262   }
   263   // Double capacity if possible
   264   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
   265   // Do not give up existing stack until we have managed to
   266   // get the double capacity that we desired.
   267   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
   268                                                            sizeof(oop)));
   269   if (rs.is_reserved()) {
   270     // Release the backing store associated with old stack
   271     _virtual_space.release();
   272     // Reinitialize virtual space for new stack
   273     if (!_virtual_space.initialize(rs, rs.size())) {
   274       fatal("Not enough swap for expanded marking stack capacity");
   275     }
   276     _base = (oop*)(_virtual_space.low());
   277     _index = 0;
   278     _capacity = new_capacity;
   279   } else {
   280     if (PrintGCDetails && Verbose) {
   281       // Failed to double capacity, continue;
   282       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
   283                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
   284                           _capacity / K, new_capacity / K);
   285     }
   286   }
   287 }
   289 void CMMarkStack::set_should_expand() {
   290   // If we're resetting the marking state because of an
   291   // marking stack overflow, record that we should, if
   292   // possible, expand the stack.
   293   _should_expand = _cm->has_overflown();
   294 }
   296 CMMarkStack::~CMMarkStack() {
   297   if (_base != NULL) {
   298     _base = NULL;
   299     _virtual_space.release();
   300   }
   301 }
   303 void CMMarkStack::par_push(oop ptr) {
   304   while (true) {
   305     if (isFull()) {
   306       _overflow = true;
   307       return;
   308     }
   309     // Otherwise...
   310     jint index = _index;
   311     jint next_index = index+1;
   312     jint res = Atomic::cmpxchg(next_index, &_index, index);
   313     if (res == index) {
   314       _base[index] = ptr;
   315       // Note that we don't maintain this atomically.  We could, but it
   316       // doesn't seem necessary.
   317       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
   318       return;
   319     }
   320     // Otherwise, we need to try again.
   321   }
   322 }
   324 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
   325   while (true) {
   326     if (isFull()) {
   327       _overflow = true;
   328       return;
   329     }
   330     // Otherwise...
   331     jint index = _index;
   332     jint next_index = index + n;
   333     if (next_index > _capacity) {
   334       _overflow = true;
   335       return;
   336     }
   337     jint res = Atomic::cmpxchg(next_index, &_index, index);
   338     if (res == index) {
   339       for (int i = 0; i < n; i++) {
   340         int  ind = index + i;
   341         assert(ind < _capacity, "By overflow test above.");
   342         _base[ind] = ptr_arr[i];
   343       }
   344       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
   345       return;
   346     }
   347     // Otherwise, we need to try again.
   348   }
   349 }
   351 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
   352   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
   353   jint start = _index;
   354   jint next_index = start + n;
   355   if (next_index > _capacity) {
   356     _overflow = true;
   357     return;
   358   }
   359   // Otherwise.
   360   _index = next_index;
   361   for (int i = 0; i < n; i++) {
   362     int ind = start + i;
   363     assert(ind < _capacity, "By overflow test above.");
   364     _base[ind] = ptr_arr[i];
   365   }
   366   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
   367 }
   369 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
   370   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
   371   jint index = _index;
   372   if (index == 0) {
   373     *n = 0;
   374     return false;
   375   } else {
   376     int k = MIN2(max, index);
   377     jint  new_ind = index - k;
   378     for (int j = 0; j < k; j++) {
   379       ptr_arr[j] = _base[new_ind + j];
   380     }
   381     _index = new_ind;
   382     *n = k;
   383     return true;
   384   }
   385 }
   387 template<class OopClosureClass>
   388 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
   389   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
   390          || SafepointSynchronize::is_at_safepoint(),
   391          "Drain recursion must be yield-safe.");
   392   bool res = true;
   393   debug_only(_drain_in_progress = true);
   394   debug_only(_drain_in_progress_yields = yield_after);
   395   while (!isEmpty()) {
   396     oop newOop = pop();
   397     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
   398     assert(newOop->is_oop(), "Expected an oop");
   399     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
   400            "only grey objects on this stack");
   401     newOop->oop_iterate(cl);
   402     if (yield_after && _cm->do_yield_check()) {
   403       res = false;
   404       break;
   405     }
   406   }
   407   debug_only(_drain_in_progress = false);
   408   return res;
   409 }
   411 void CMMarkStack::note_start_of_gc() {
   412   assert(_saved_index == -1,
   413          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
   414   _saved_index = _index;
   415 }
   417 void CMMarkStack::note_end_of_gc() {
   418   // This is intentionally a guarantee, instead of an assert. If we
   419   // accidentally add something to the mark stack during GC, it
   420   // will be a correctness issue so it's better if we crash. we'll
   421   // only check this once per GC anyway, so it won't be a performance
   422   // issue in any way.
   423   guarantee(_saved_index == _index,
   424             err_msg("saved index: %d index: %d", _saved_index, _index));
   425   _saved_index = -1;
   426 }
   428 void CMMarkStack::oops_do(OopClosure* f) {
   429   assert(_saved_index == _index,
   430          err_msg("saved index: %d index: %d", _saved_index, _index));
   431   for (int i = 0; i < _index; i += 1) {
   432     f->do_oop(&_base[i]);
   433   }
   434 }
   436 CMRootRegions::CMRootRegions() :
   437   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
   438   _should_abort(false),  _next_survivor(NULL) { }
   440 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
   441   _young_list = g1h->young_list();
   442   _cm = cm;
   443 }
   445 void CMRootRegions::prepare_for_scan() {
   446   assert(!scan_in_progress(), "pre-condition");
   448   // Currently, only survivors can be root regions.
   449   assert(_next_survivor == NULL, "pre-condition");
   450   _next_survivor = _young_list->first_survivor_region();
   451   _scan_in_progress = (_next_survivor != NULL);
   452   _should_abort = false;
   453 }
   455 HeapRegion* CMRootRegions::claim_next() {
   456   if (_should_abort) {
   457     // If someone has set the should_abort flag, we return NULL to
   458     // force the caller to bail out of their loop.
   459     return NULL;
   460   }
   462   // Currently, only survivors can be root regions.
   463   HeapRegion* res = _next_survivor;
   464   if (res != NULL) {
   465     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
   466     // Read it again in case it changed while we were waiting for the lock.
   467     res = _next_survivor;
   468     if (res != NULL) {
   469       if (res == _young_list->last_survivor_region()) {
   470         // We just claimed the last survivor so store NULL to indicate
   471         // that we're done.
   472         _next_survivor = NULL;
   473       } else {
   474         _next_survivor = res->get_next_young_region();
   475       }
   476     } else {
   477       // Someone else claimed the last survivor while we were trying
   478       // to take the lock so nothing else to do.
   479     }
   480   }
   481   assert(res == NULL || res->is_survivor(), "post-condition");
   483   return res;
   484 }
   486 void CMRootRegions::scan_finished() {
   487   assert(scan_in_progress(), "pre-condition");
   489   // Currently, only survivors can be root regions.
   490   if (!_should_abort) {
   491     assert(_next_survivor == NULL, "we should have claimed all survivors");
   492   }
   493   _next_survivor = NULL;
   495   {
   496     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
   497     _scan_in_progress = false;
   498     RootRegionScan_lock->notify_all();
   499   }
   500 }
   502 bool CMRootRegions::wait_until_scan_finished() {
   503   if (!scan_in_progress()) return false;
   505   {
   506     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
   507     while (scan_in_progress()) {
   508       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
   509     }
   510   }
   511   return true;
   512 }
   514 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   515 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   516 #endif // _MSC_VER
   518 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
   519   return MAX2((n_par_threads + 2) / 4, 1U);
   520 }
   522 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
   523   _g1h(g1h),
   524   _markBitMap1(),
   525   _markBitMap2(),
   526   _parallel_marking_threads(0),
   527   _max_parallel_marking_threads(0),
   528   _sleep_factor(0.0),
   529   _marking_task_overhead(1.0),
   530   _cleanup_sleep_factor(0.0),
   531   _cleanup_task_overhead(1.0),
   532   _cleanup_list("Cleanup List"),
   533   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
   534   _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
   535             CardTableModRefBS::card_shift,
   536             false /* in_resource_area*/),
   538   _prevMarkBitMap(&_markBitMap1),
   539   _nextMarkBitMap(&_markBitMap2),
   541   _markStack(this),
   542   // _finger set in set_non_marking_state
   544   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
   545   // _active_tasks set in set_non_marking_state
   546   // _tasks set inside the constructor
   547   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
   548   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
   550   _has_overflown(false),
   551   _concurrent(false),
   552   _has_aborted(false),
   553   _aborted_gc_id(GCId::undefined()),
   554   _restart_for_overflow(false),
   555   _concurrent_marking_in_progress(false),
   557   // _verbose_level set below
   559   _init_times(),
   560   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
   561   _cleanup_times(),
   562   _total_counting_time(0.0),
   563   _total_rs_scrub_time(0.0),
   565   _parallel_workers(NULL),
   567   _count_card_bitmaps(NULL),
   568   _count_marked_bytes(NULL),
   569   _completed_initialization(false) {
   570   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
   571   if (verbose_level < no_verbose) {
   572     verbose_level = no_verbose;
   573   }
   574   if (verbose_level > high_verbose) {
   575     verbose_level = high_verbose;
   576   }
   577   _verbose_level = verbose_level;
   579   if (verbose_low()) {
   580     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
   581                            "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
   582   }
   584   _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
   585   _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
   587   // Create & start a ConcurrentMark thread.
   588   _cmThread = new ConcurrentMarkThread(this);
   589   assert(cmThread() != NULL, "CM Thread should have been created");
   590   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
   591   if (_cmThread->osthread() == NULL) {
   592       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
   593   }
   595   assert(CGC_lock != NULL, "Where's the CGC_lock?");
   596   assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
   597   assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
   599   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
   600   satb_qs.set_buffer_size(G1SATBBufferSize);
   602   _root_regions.init(_g1h, this);
   604   if (ConcGCThreads > ParallelGCThreads) {
   605     warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
   606             "than ParallelGCThreads (" UINTX_FORMAT ").",
   607             ConcGCThreads, ParallelGCThreads);
   608     return;
   609   }
   610   if (ParallelGCThreads == 0) {
   611     // if we are not running with any parallel GC threads we will not
   612     // spawn any marking threads either
   613     _parallel_marking_threads =       0;
   614     _max_parallel_marking_threads =   0;
   615     _sleep_factor             =     0.0;
   616     _marking_task_overhead    =     1.0;
   617   } else {
   618     if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
   619       // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
   620       // if both are set
   621       _sleep_factor             = 0.0;
   622       _marking_task_overhead    = 1.0;
   623     } else if (G1MarkingOverheadPercent > 0) {
   624       // We will calculate the number of parallel marking threads based
   625       // on a target overhead with respect to the soft real-time goal
   626       double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
   627       double overall_cm_overhead =
   628         (double) MaxGCPauseMillis * marking_overhead /
   629         (double) GCPauseIntervalMillis;
   630       double cpu_ratio = 1.0 / (double) os::processor_count();
   631       double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
   632       double marking_task_overhead =
   633         overall_cm_overhead / marking_thread_num *
   634                                                 (double) os::processor_count();
   635       double sleep_factor =
   636                          (1.0 - marking_task_overhead) / marking_task_overhead;
   638       FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
   639       _sleep_factor             = sleep_factor;
   640       _marking_task_overhead    = marking_task_overhead;
   641     } else {
   642       // Calculate the number of parallel marking threads by scaling
   643       // the number of parallel GC threads.
   644       uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
   645       FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
   646       _sleep_factor             = 0.0;
   647       _marking_task_overhead    = 1.0;
   648     }
   650     assert(ConcGCThreads > 0, "Should have been set");
   651     _parallel_marking_threads = (uint) ConcGCThreads;
   652     _max_parallel_marking_threads = _parallel_marking_threads;
   654     if (parallel_marking_threads() > 1) {
   655       _cleanup_task_overhead = 1.0;
   656     } else {
   657       _cleanup_task_overhead = marking_task_overhead();
   658     }
   659     _cleanup_sleep_factor =
   660                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
   662 #if 0
   663     gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
   664     gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
   665     gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
   666     gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
   667     gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
   668 #endif
   670     guarantee(parallel_marking_threads() > 0, "peace of mind");
   671     _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
   672          _max_parallel_marking_threads, false, true);
   673     if (_parallel_workers == NULL) {
   674       vm_exit_during_initialization("Failed necessary allocation.");
   675     } else {
   676       _parallel_workers->initialize_workers();
   677     }
   678   }
   680   if (FLAG_IS_DEFAULT(MarkStackSize)) {
   681     uintx mark_stack_size =
   682       MIN2(MarkStackSizeMax,
   683           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
   684     // Verify that the calculated value for MarkStackSize is in range.
   685     // It would be nice to use the private utility routine from Arguments.
   686     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
   687       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
   688               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
   689               mark_stack_size, (uintx) 1, MarkStackSizeMax);
   690       return;
   691     }
   692     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
   693   } else {
   694     // Verify MarkStackSize is in range.
   695     if (FLAG_IS_CMDLINE(MarkStackSize)) {
   696       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
   697         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
   698           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
   699                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
   700                   MarkStackSize, (uintx) 1, MarkStackSizeMax);
   701           return;
   702         }
   703       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
   704         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
   705           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
   706                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
   707                   MarkStackSize, MarkStackSizeMax);
   708           return;
   709         }
   710       }
   711     }
   712   }
   714   if (!_markStack.allocate(MarkStackSize)) {
   715     warning("Failed to allocate CM marking stack");
   716     return;
   717   }
   719   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
   720   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
   722   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
   723   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
   725   BitMap::idx_t card_bm_size = _card_bm.size();
   727   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
   728   _active_tasks = _max_worker_id;
   730   size_t max_regions = (size_t) _g1h->max_regions();
   731   for (uint i = 0; i < _max_worker_id; ++i) {
   732     CMTaskQueue* task_queue = new CMTaskQueue();
   733     task_queue->initialize();
   734     _task_queues->register_queue(i, task_queue);
   736     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
   737     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
   739     _tasks[i] = new CMTask(i, this,
   740                            _count_marked_bytes[i],
   741                            &_count_card_bitmaps[i],
   742                            task_queue, _task_queues);
   744     _accum_task_vtime[i] = 0.0;
   745   }
   747   // Calculate the card number for the bottom of the heap. Used
   748   // in biasing indexes into the accounting card bitmaps.
   749   _heap_bottom_card_num =
   750     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
   751                                 CardTableModRefBS::card_shift);
   753   // Clear all the liveness counting data
   754   clear_all_count_data();
   756   // so that the call below can read a sensible value
   757   _heap_start = g1h->reserved_region().start();
   758   set_non_marking_state();
   759   _completed_initialization = true;
   760 }
   762 void ConcurrentMark::reset() {
   763   // Starting values for these two. This should be called in a STW
   764   // phase.
   765   MemRegion reserved = _g1h->g1_reserved();
   766   _heap_start = reserved.start();
   767   _heap_end   = reserved.end();
   769   // Separated the asserts so that we know which one fires.
   770   assert(_heap_start != NULL, "heap bounds should look ok");
   771   assert(_heap_end != NULL, "heap bounds should look ok");
   772   assert(_heap_start < _heap_end, "heap bounds should look ok");
   774   // Reset all the marking data structures and any necessary flags
   775   reset_marking_state();
   777   if (verbose_low()) {
   778     gclog_or_tty->print_cr("[global] resetting");
   779   }
   781   // We do reset all of them, since different phases will use
   782   // different number of active threads. So, it's easiest to have all
   783   // of them ready.
   784   for (uint i = 0; i < _max_worker_id; ++i) {
   785     _tasks[i]->reset(_nextMarkBitMap);
   786   }
   788   // we need this to make sure that the flag is on during the evac
   789   // pause with initial mark piggy-backed
   790   set_concurrent_marking_in_progress();
   791 }
   794 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
   795   _markStack.set_should_expand();
   796   _markStack.setEmpty();        // Also clears the _markStack overflow flag
   797   if (clear_overflow) {
   798     clear_has_overflown();
   799   } else {
   800     assert(has_overflown(), "pre-condition");
   801   }
   802   _finger = _heap_start;
   804   for (uint i = 0; i < _max_worker_id; ++i) {
   805     CMTaskQueue* queue = _task_queues->queue(i);
   806     queue->set_empty();
   807   }
   808 }
   810 void ConcurrentMark::set_concurrency(uint active_tasks) {
   811   assert(active_tasks <= _max_worker_id, "we should not have more");
   813   _active_tasks = active_tasks;
   814   // Need to update the three data structures below according to the
   815   // number of active threads for this phase.
   816   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
   817   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
   818   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
   819 }
   821 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
   822   set_concurrency(active_tasks);
   824   _concurrent = concurrent;
   825   // We propagate this to all tasks, not just the active ones.
   826   for (uint i = 0; i < _max_worker_id; ++i)
   827     _tasks[i]->set_concurrent(concurrent);
   829   if (concurrent) {
   830     set_concurrent_marking_in_progress();
   831   } else {
   832     // We currently assume that the concurrent flag has been set to
   833     // false before we start remark. At this point we should also be
   834     // in a STW phase.
   835     assert(!concurrent_marking_in_progress(), "invariant");
   836     assert(out_of_regions(),
   837            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
   838                    p2i(_finger), p2i(_heap_end)));
   839   }
   840 }
   842 void ConcurrentMark::set_non_marking_state() {
   843   // We set the global marking state to some default values when we're
   844   // not doing marking.
   845   reset_marking_state();
   846   _active_tasks = 0;
   847   clear_concurrent_marking_in_progress();
   848 }
   850 ConcurrentMark::~ConcurrentMark() {
   851   // The ConcurrentMark instance is never freed.
   852   ShouldNotReachHere();
   853 }
   855 void ConcurrentMark::clearNextBitmap() {
   856   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   858   // Make sure that the concurrent mark thread looks to still be in
   859   // the current cycle.
   860   guarantee(cmThread()->during_cycle(), "invariant");
   862   // We are finishing up the current cycle by clearing the next
   863   // marking bitmap and getting it ready for the next cycle. During
   864   // this time no other cycle can start. So, let's make sure that this
   865   // is the case.
   866   guarantee(!g1h->mark_in_progress(), "invariant");
   868   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
   869   g1h->heap_region_iterate(&cl);
   871   // Clear the liveness counting data. If the marking has been aborted, the abort()
   872   // call already did that.
   873   if (cl.complete()) {
   874     clear_all_count_data();
   875   }
   877   // Repeat the asserts from above.
   878   guarantee(cmThread()->during_cycle(), "invariant");
   879   guarantee(!g1h->mark_in_progress(), "invariant");
   880 }
   882 class CheckBitmapClearHRClosure : public HeapRegionClosure {
   883   CMBitMap* _bitmap;
   884   bool _error;
   885  public:
   886   CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
   887   }
   889   virtual bool doHeapRegion(HeapRegion* r) {
   890     // This closure can be called concurrently to the mutator, so we must make sure
   891     // that the result of the getNextMarkedWordAddress() call is compared to the
   892     // value passed to it as limit to detect any found bits.
   893     // We can use the region's orig_end() for the limit and the comparison value
   894     // as it always contains the "real" end of the region that never changes and
   895     // has no side effects.
   896     // Due to the latter, there can also be no problem with the compiler generating
   897     // reloads of the orig_end() call.
   898     HeapWord* end = r->orig_end();
   899     return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
   900   }
   901 };
   903 bool ConcurrentMark::nextMarkBitmapIsClear() {
   904   CheckBitmapClearHRClosure cl(_nextMarkBitMap);
   905   _g1h->heap_region_iterate(&cl);
   906   return cl.complete();
   907 }
   909 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
   910 public:
   911   bool doHeapRegion(HeapRegion* r) {
   912     if (!r->continuesHumongous()) {
   913       r->note_start_of_marking();
   914     }
   915     return false;
   916   }
   917 };
   919 void ConcurrentMark::checkpointRootsInitialPre() {
   920   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
   921   G1CollectorPolicy* g1p = g1h->g1_policy();
   923   _has_aborted = false;
   925 #ifndef PRODUCT
   926   if (G1PrintReachableAtInitialMark) {
   927     print_reachable("at-cycle-start",
   928                     VerifyOption_G1UsePrevMarking, true /* all */);
   929   }
   930 #endif
   932   // Initialise marking structures. This has to be done in a STW phase.
   933   reset();
   935   // For each region note start of marking.
   936   NoteStartOfMarkHRClosure startcl;
   937   g1h->heap_region_iterate(&startcl);
   938 }
   941 void ConcurrentMark::checkpointRootsInitialPost() {
   942   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
   944   // If we force an overflow during remark, the remark operation will
   945   // actually abort and we'll restart concurrent marking. If we always
   946   // force an oveflow during remark we'll never actually complete the
   947   // marking phase. So, we initilize this here, at the start of the
   948   // cycle, so that at the remaining overflow number will decrease at
   949   // every remark and we'll eventually not need to cause one.
   950   force_overflow_stw()->init();
   952   // Start Concurrent Marking weak-reference discovery.
   953   ReferenceProcessor* rp = g1h->ref_processor_cm();
   954   // enable ("weak") refs discovery
   955   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   956   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
   958   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
   959   // This is the start of  the marking cycle, we're expected all
   960   // threads to have SATB queues with active set to false.
   961   satb_mq_set.set_active_all_threads(true, /* new active value */
   962                                      false /* expected_active */);
   964   _root_regions.prepare_for_scan();
   966   // update_g1_committed() will be called at the end of an evac pause
   967   // when marking is on. So, it's also called at the end of the
   968   // initial-mark pause to update the heap end, if the heap expands
   969   // during it. No need to call it here.
   970 }
   972 /*
   973  * Notice that in the next two methods, we actually leave the STS
   974  * during the barrier sync and join it immediately afterwards. If we
   975  * do not do this, the following deadlock can occur: one thread could
   976  * be in the barrier sync code, waiting for the other thread to also
   977  * sync up, whereas another one could be trying to yield, while also
   978  * waiting for the other threads to sync up too.
   979  *
   980  * Note, however, that this code is also used during remark and in
   981  * this case we should not attempt to leave / enter the STS, otherwise
   982  * we'll either hit an asseert (debug / fastdebug) or deadlock
   983  * (product). So we should only leave / enter the STS if we are
   984  * operating concurrently.
   985  *
   986  * Because the thread that does the sync barrier has left the STS, it
   987  * is possible to be suspended for a Full GC or an evacuation pause
   988  * could occur. This is actually safe, since the entering the sync
   989  * barrier is one of the last things do_marking_step() does, and it
   990  * doesn't manipulate any data structures afterwards.
   991  */
   993 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
   994   if (verbose_low()) {
   995     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
   996   }
   998   if (concurrent()) {
   999     SuspendibleThreadSet::leave();
  1002   bool barrier_aborted = !_first_overflow_barrier_sync.enter();
  1004   if (concurrent()) {
  1005     SuspendibleThreadSet::join();
  1007   // at this point everyone should have synced up and not be doing any
  1008   // more work
  1010   if (verbose_low()) {
  1011     if (barrier_aborted) {
  1012       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
  1013     } else {
  1014       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
  1018   if (barrier_aborted) {
  1019     // If the barrier aborted we ignore the overflow condition and
  1020     // just abort the whole marking phase as quickly as possible.
  1021     return;
  1024   // If we're executing the concurrent phase of marking, reset the marking
  1025   // state; otherwise the marking state is reset after reference processing,
  1026   // during the remark pause.
  1027   // If we reset here as a result of an overflow during the remark we will
  1028   // see assertion failures from any subsequent set_concurrency_and_phase()
  1029   // calls.
  1030   if (concurrent()) {
  1031     // let the task associated with with worker 0 do this
  1032     if (worker_id == 0) {
  1033       // task 0 is responsible for clearing the global data structures
  1034       // We should be here because of an overflow. During STW we should
  1035       // not clear the overflow flag since we rely on it being true when
  1036       // we exit this method to abort the pause and restart concurent
  1037       // marking.
  1038       reset_marking_state(true /* clear_overflow */);
  1039       force_overflow()->update();
  1041       if (G1Log::fine()) {
  1042         gclog_or_tty->gclog_stamp(concurrent_gc_id());
  1043         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
  1048   // after this, each task should reset its own data structures then
  1049   // then go into the second barrier
  1052 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
  1053   if (verbose_low()) {
  1054     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
  1057   if (concurrent()) {
  1058     SuspendibleThreadSet::leave();
  1061   bool barrier_aborted = !_second_overflow_barrier_sync.enter();
  1063   if (concurrent()) {
  1064     SuspendibleThreadSet::join();
  1066   // at this point everything should be re-initialized and ready to go
  1068   if (verbose_low()) {
  1069     if (barrier_aborted) {
  1070       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
  1071     } else {
  1072       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
  1077 #ifndef PRODUCT
  1078 void ForceOverflowSettings::init() {
  1079   _num_remaining = G1ConcMarkForceOverflow;
  1080   _force = false;
  1081   update();
  1084 void ForceOverflowSettings::update() {
  1085   if (_num_remaining > 0) {
  1086     _num_remaining -= 1;
  1087     _force = true;
  1088   } else {
  1089     _force = false;
  1093 bool ForceOverflowSettings::should_force() {
  1094   if (_force) {
  1095     _force = false;
  1096     return true;
  1097   } else {
  1098     return false;
  1101 #endif // !PRODUCT
  1103 class CMConcurrentMarkingTask: public AbstractGangTask {
  1104 private:
  1105   ConcurrentMark*       _cm;
  1106   ConcurrentMarkThread* _cmt;
  1108 public:
  1109   void work(uint worker_id) {
  1110     assert(Thread::current()->is_ConcurrentGC_thread(),
  1111            "this should only be done by a conc GC thread");
  1112     ResourceMark rm;
  1114     double start_vtime = os::elapsedVTime();
  1116     SuspendibleThreadSet::join();
  1118     assert(worker_id < _cm->active_tasks(), "invariant");
  1119     CMTask* the_task = _cm->task(worker_id);
  1120     the_task->record_start_time();
  1121     if (!_cm->has_aborted()) {
  1122       do {
  1123         double start_vtime_sec = os::elapsedVTime();
  1124         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
  1126         the_task->do_marking_step(mark_step_duration_ms,
  1127                                   true  /* do_termination */,
  1128                                   false /* is_serial*/);
  1130         double end_vtime_sec = os::elapsedVTime();
  1131         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
  1132         _cm->clear_has_overflown();
  1134         _cm->do_yield_check(worker_id);
  1136         jlong sleep_time_ms;
  1137         if (!_cm->has_aborted() && the_task->has_aborted()) {
  1138           sleep_time_ms =
  1139             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
  1140           SuspendibleThreadSet::leave();
  1141           os::sleep(Thread::current(), sleep_time_ms, false);
  1142           SuspendibleThreadSet::join();
  1144       } while (!_cm->has_aborted() && the_task->has_aborted());
  1146     the_task->record_end_time();
  1147     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
  1149     SuspendibleThreadSet::leave();
  1151     double end_vtime = os::elapsedVTime();
  1152     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
  1155   CMConcurrentMarkingTask(ConcurrentMark* cm,
  1156                           ConcurrentMarkThread* cmt) :
  1157       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
  1159   ~CMConcurrentMarkingTask() { }
  1160 };
  1162 // Calculates the number of active workers for a concurrent
  1163 // phase.
  1164 uint ConcurrentMark::calc_parallel_marking_threads() {
  1165   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1166     uint n_conc_workers = 0;
  1167     if (!UseDynamicNumberOfGCThreads ||
  1168         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
  1169          !ForceDynamicNumberOfGCThreads)) {
  1170       n_conc_workers = max_parallel_marking_threads();
  1171     } else {
  1172       n_conc_workers =
  1173         AdaptiveSizePolicy::calc_default_active_workers(
  1174                                      max_parallel_marking_threads(),
  1175                                      1, /* Minimum workers */
  1176                                      parallel_marking_threads(),
  1177                                      Threads::number_of_non_daemon_threads());
  1178       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
  1179       // that scaling has already gone into "_max_parallel_marking_threads".
  1181     assert(n_conc_workers > 0, "Always need at least 1");
  1182     return n_conc_workers;
  1184   // If we are not running with any parallel GC threads we will not
  1185   // have spawned any marking threads either. Hence the number of
  1186   // concurrent workers should be 0.
  1187   return 0;
  1190 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
  1191   // Currently, only survivors can be root regions.
  1192   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
  1193   G1RootRegionScanClosure cl(_g1h, this, worker_id);
  1195   const uintx interval = PrefetchScanIntervalInBytes;
  1196   HeapWord* curr = hr->bottom();
  1197   const HeapWord* end = hr->top();
  1198   while (curr < end) {
  1199     Prefetch::read(curr, interval);
  1200     oop obj = oop(curr);
  1201     int size = obj->oop_iterate(&cl);
  1202     assert(size == obj->size(), "sanity");
  1203     curr += size;
  1207 class CMRootRegionScanTask : public AbstractGangTask {
  1208 private:
  1209   ConcurrentMark* _cm;
  1211 public:
  1212   CMRootRegionScanTask(ConcurrentMark* cm) :
  1213     AbstractGangTask("Root Region Scan"), _cm(cm) { }
  1215   void work(uint worker_id) {
  1216     assert(Thread::current()->is_ConcurrentGC_thread(),
  1217            "this should only be done by a conc GC thread");
  1219     CMRootRegions* root_regions = _cm->root_regions();
  1220     HeapRegion* hr = root_regions->claim_next();
  1221     while (hr != NULL) {
  1222       _cm->scanRootRegion(hr, worker_id);
  1223       hr = root_regions->claim_next();
  1226 };
  1228 void ConcurrentMark::scanRootRegions() {
  1229   // Start of concurrent marking.
  1230   ClassLoaderDataGraph::clear_claimed_marks();
  1232   // scan_in_progress() will have been set to true only if there was
  1233   // at least one root region to scan. So, if it's false, we
  1234   // should not attempt to do any further work.
  1235   if (root_regions()->scan_in_progress()) {
  1236     _parallel_marking_threads = calc_parallel_marking_threads();
  1237     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
  1238            "Maximum number of marking threads exceeded");
  1239     uint active_workers = MAX2(1U, parallel_marking_threads());
  1241     CMRootRegionScanTask task(this);
  1242     if (use_parallel_marking_threads()) {
  1243       _parallel_workers->set_active_workers((int) active_workers);
  1244       _parallel_workers->run_task(&task);
  1245     } else {
  1246       task.work(0);
  1249     // It's possible that has_aborted() is true here without actually
  1250     // aborting the survivor scan earlier. This is OK as it's
  1251     // mainly used for sanity checking.
  1252     root_regions()->scan_finished();
  1256 void ConcurrentMark::markFromRoots() {
  1257   // we might be tempted to assert that:
  1258   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
  1259   //        "inconsistent argument?");
  1260   // However that wouldn't be right, because it's possible that
  1261   // a safepoint is indeed in progress as a younger generation
  1262   // stop-the-world GC happens even as we mark in this generation.
  1264   _restart_for_overflow = false;
  1265   force_overflow_conc()->init();
  1267   // _g1h has _n_par_threads
  1268   _parallel_marking_threads = calc_parallel_marking_threads();
  1269   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
  1270     "Maximum number of marking threads exceeded");
  1272   uint active_workers = MAX2(1U, parallel_marking_threads());
  1274   // Parallel task terminator is set in "set_concurrency_and_phase()"
  1275   set_concurrency_and_phase(active_workers, true /* concurrent */);
  1277   CMConcurrentMarkingTask markingTask(this, cmThread());
  1278   if (use_parallel_marking_threads()) {
  1279     _parallel_workers->set_active_workers((int)active_workers);
  1280     // Don't set _n_par_threads because it affects MT in process_roots()
  1281     // and the decisions on that MT processing is made elsewhere.
  1282     assert(_parallel_workers->active_workers() > 0, "Should have been set");
  1283     _parallel_workers->run_task(&markingTask);
  1284   } else {
  1285     markingTask.work(0);
  1287   print_stats();
  1290 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
  1291   // world is stopped at this checkpoint
  1292   assert(SafepointSynchronize::is_at_safepoint(),
  1293          "world should be stopped");
  1295   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1297   // If a full collection has happened, we shouldn't do this.
  1298   if (has_aborted()) {
  1299     g1h->set_marking_complete(); // So bitmap clearing isn't confused
  1300     return;
  1303   SvcGCMarker sgcm(SvcGCMarker::OTHER);
  1305   if (VerifyDuringGC) {
  1306     HandleMark hm;  // handle scope
  1307     Universe::heap()->prepare_for_verify();
  1308     Universe::verify(VerifyOption_G1UsePrevMarking,
  1309                      " VerifyDuringGC:(before)");
  1311   g1h->check_bitmaps("Remark Start");
  1313   G1CollectorPolicy* g1p = g1h->g1_policy();
  1314   g1p->record_concurrent_mark_remark_start();
  1316   double start = os::elapsedTime();
  1318   checkpointRootsFinalWork();
  1320   double mark_work_end = os::elapsedTime();
  1322   weakRefsWork(clear_all_soft_refs);
  1324   if (has_overflown()) {
  1325     // Oops.  We overflowed.  Restart concurrent marking.
  1326     _restart_for_overflow = true;
  1327     if (G1TraceMarkStackOverflow) {
  1328       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
  1331     // Verify the heap w.r.t. the previous marking bitmap.
  1332     if (VerifyDuringGC) {
  1333       HandleMark hm;  // handle scope
  1334       Universe::heap()->prepare_for_verify();
  1335       Universe::verify(VerifyOption_G1UsePrevMarking,
  1336                        " VerifyDuringGC:(overflow)");
  1339     // Clear the marking state because we will be restarting
  1340     // marking due to overflowing the global mark stack.
  1341     reset_marking_state();
  1342   } else {
  1343     // Aggregate the per-task counting data that we have accumulated
  1344     // while marking.
  1345     aggregate_count_data();
  1347     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  1348     // We're done with marking.
  1349     // This is the end of  the marking cycle, we're expected all
  1350     // threads to have SATB queues with active set to true.
  1351     satb_mq_set.set_active_all_threads(false, /* new active value */
  1352                                        true /* expected_active */);
  1354     if (VerifyDuringGC) {
  1355       HandleMark hm;  // handle scope
  1356       Universe::heap()->prepare_for_verify();
  1357       Universe::verify(VerifyOption_G1UseNextMarking,
  1358                        " VerifyDuringGC:(after)");
  1360     g1h->check_bitmaps("Remark End");
  1361     assert(!restart_for_overflow(), "sanity");
  1362     // Completely reset the marking state since marking completed
  1363     set_non_marking_state();
  1366   // Expand the marking stack, if we have to and if we can.
  1367   if (_markStack.should_expand()) {
  1368     _markStack.expand();
  1371   // Statistics
  1372   double now = os::elapsedTime();
  1373   _remark_mark_times.add((mark_work_end - start) * 1000.0);
  1374   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
  1375   _remark_times.add((now - start) * 1000.0);
  1377   g1p->record_concurrent_mark_remark_end();
  1379   G1CMIsAliveClosure is_alive(g1h);
  1380   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
  1383 // Base class of the closures that finalize and verify the
  1384 // liveness counting data.
  1385 class CMCountDataClosureBase: public HeapRegionClosure {
  1386 protected:
  1387   G1CollectedHeap* _g1h;
  1388   ConcurrentMark* _cm;
  1389   CardTableModRefBS* _ct_bs;
  1391   BitMap* _region_bm;
  1392   BitMap* _card_bm;
  1394   // Takes a region that's not empty (i.e., it has at least one
  1395   // live object in it and sets its corresponding bit on the region
  1396   // bitmap to 1. If the region is "starts humongous" it will also set
  1397   // to 1 the bits on the region bitmap that correspond to its
  1398   // associated "continues humongous" regions.
  1399   void set_bit_for_region(HeapRegion* hr) {
  1400     assert(!hr->continuesHumongous(), "should have filtered those out");
  1402     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
  1403     if (!hr->startsHumongous()) {
  1404       // Normal (non-humongous) case: just set the bit.
  1405       _region_bm->par_at_put(index, true);
  1406     } else {
  1407       // Starts humongous case: calculate how many regions are part of
  1408       // this humongous region and then set the bit range.
  1409       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
  1410       _region_bm->par_at_put_range(index, end_index, true);
  1414 public:
  1415   CMCountDataClosureBase(G1CollectedHeap* g1h,
  1416                          BitMap* region_bm, BitMap* card_bm):
  1417     _g1h(g1h), _cm(g1h->concurrent_mark()),
  1418     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
  1419     _region_bm(region_bm), _card_bm(card_bm) { }
  1420 };
  1422 // Closure that calculates the # live objects per region. Used
  1423 // for verification purposes during the cleanup pause.
  1424 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
  1425   CMBitMapRO* _bm;
  1426   size_t _region_marked_bytes;
  1428 public:
  1429   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
  1430                          BitMap* region_bm, BitMap* card_bm) :
  1431     CMCountDataClosureBase(g1h, region_bm, card_bm),
  1432     _bm(bm), _region_marked_bytes(0) { }
  1434   bool doHeapRegion(HeapRegion* hr) {
  1436     if (hr->continuesHumongous()) {
  1437       // We will ignore these here and process them when their
  1438       // associated "starts humongous" region is processed (see
  1439       // set_bit_for_heap_region()). Note that we cannot rely on their
  1440       // associated "starts humongous" region to have their bit set to
  1441       // 1 since, due to the region chunking in the parallel region
  1442       // iteration, a "continues humongous" region might be visited
  1443       // before its associated "starts humongous".
  1444       return false;
  1447     HeapWord* ntams = hr->next_top_at_mark_start();
  1448     HeapWord* start = hr->bottom();
  1450     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
  1451            err_msg("Preconditions not met - "
  1452                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
  1453                    p2i(start), p2i(ntams), p2i(hr->end())));
  1455     // Find the first marked object at or after "start".
  1456     start = _bm->getNextMarkedWordAddress(start, ntams);
  1458     size_t marked_bytes = 0;
  1460     while (start < ntams) {
  1461       oop obj = oop(start);
  1462       int obj_sz = obj->size();
  1463       HeapWord* obj_end = start + obj_sz;
  1465       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
  1466       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
  1468       // Note: if we're looking at the last region in heap - obj_end
  1469       // could be actually just beyond the end of the heap; end_idx
  1470       // will then correspond to a (non-existent) card that is also
  1471       // just beyond the heap.
  1472       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
  1473         // end of object is not card aligned - increment to cover
  1474         // all the cards spanned by the object
  1475         end_idx += 1;
  1478       // Set the bits in the card BM for the cards spanned by this object.
  1479       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
  1481       // Add the size of this object to the number of marked bytes.
  1482       marked_bytes += (size_t)obj_sz * HeapWordSize;
  1484       // Find the next marked object after this one.
  1485       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
  1488     // Mark the allocated-since-marking portion...
  1489     HeapWord* top = hr->top();
  1490     if (ntams < top) {
  1491       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
  1492       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
  1494       // Note: if we're looking at the last region in heap - top
  1495       // could be actually just beyond the end of the heap; end_idx
  1496       // will then correspond to a (non-existent) card that is also
  1497       // just beyond the heap.
  1498       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
  1499         // end of object is not card aligned - increment to cover
  1500         // all the cards spanned by the object
  1501         end_idx += 1;
  1503       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
  1505       // This definitely means the region has live objects.
  1506       set_bit_for_region(hr);
  1509     // Update the live region bitmap.
  1510     if (marked_bytes > 0) {
  1511       set_bit_for_region(hr);
  1514     // Set the marked bytes for the current region so that
  1515     // it can be queried by a calling verificiation routine
  1516     _region_marked_bytes = marked_bytes;
  1518     return false;
  1521   size_t region_marked_bytes() const { return _region_marked_bytes; }
  1522 };
  1524 // Heap region closure used for verifying the counting data
  1525 // that was accumulated concurrently and aggregated during
  1526 // the remark pause. This closure is applied to the heap
  1527 // regions during the STW cleanup pause.
  1529 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
  1530   G1CollectedHeap* _g1h;
  1531   ConcurrentMark* _cm;
  1532   CalcLiveObjectsClosure _calc_cl;
  1533   BitMap* _region_bm;   // Region BM to be verified
  1534   BitMap* _card_bm;     // Card BM to be verified
  1535   bool _verbose;        // verbose output?
  1537   BitMap* _exp_region_bm; // Expected Region BM values
  1538   BitMap* _exp_card_bm;   // Expected card BM values
  1540   int _failures;
  1542 public:
  1543   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
  1544                                 BitMap* region_bm,
  1545                                 BitMap* card_bm,
  1546                                 BitMap* exp_region_bm,
  1547                                 BitMap* exp_card_bm,
  1548                                 bool verbose) :
  1549     _g1h(g1h), _cm(g1h->concurrent_mark()),
  1550     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
  1551     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
  1552     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
  1553     _failures(0) { }
  1555   int failures() const { return _failures; }
  1557   bool doHeapRegion(HeapRegion* hr) {
  1558     if (hr->continuesHumongous()) {
  1559       // We will ignore these here and process them when their
  1560       // associated "starts humongous" region is processed (see
  1561       // set_bit_for_heap_region()). Note that we cannot rely on their
  1562       // associated "starts humongous" region to have their bit set to
  1563       // 1 since, due to the region chunking in the parallel region
  1564       // iteration, a "continues humongous" region might be visited
  1565       // before its associated "starts humongous".
  1566       return false;
  1569     int failures = 0;
  1571     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
  1572     // this region and set the corresponding bits in the expected region
  1573     // and card bitmaps.
  1574     bool res = _calc_cl.doHeapRegion(hr);
  1575     assert(res == false, "should be continuing");
  1577     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
  1578                     Mutex::_no_safepoint_check_flag);
  1580     // Verify the marked bytes for this region.
  1581     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
  1582     size_t act_marked_bytes = hr->next_marked_bytes();
  1584     // We're not OK if expected marked bytes > actual marked bytes. It means
  1585     // we have missed accounting some objects during the actual marking.
  1586     if (exp_marked_bytes > act_marked_bytes) {
  1587       if (_verbose) {
  1588         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
  1589                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
  1590                                hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
  1592       failures += 1;
  1595     // Verify the bit, for this region, in the actual and expected
  1596     // (which was just calculated) region bit maps.
  1597     // We're not OK if the bit in the calculated expected region
  1598     // bitmap is set and the bit in the actual region bitmap is not.
  1599     BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
  1601     bool expected = _exp_region_bm->at(index);
  1602     bool actual = _region_bm->at(index);
  1603     if (expected && !actual) {
  1604       if (_verbose) {
  1605         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
  1606                                "expected: %s, actual: %s",
  1607                                hr->hrm_index(),
  1608                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
  1610       failures += 1;
  1613     // Verify that the card bit maps for the cards spanned by the current
  1614     // region match. We have an error if we have a set bit in the expected
  1615     // bit map and the corresponding bit in the actual bitmap is not set.
  1617     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
  1618     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
  1620     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
  1621       expected = _exp_card_bm->at(i);
  1622       actual = _card_bm->at(i);
  1624       if (expected && !actual) {
  1625         if (_verbose) {
  1626           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
  1627                                  "expected: %s, actual: %s",
  1628                                  hr->hrm_index(), i,
  1629                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
  1631         failures += 1;
  1635     if (failures > 0 && _verbose)  {
  1636       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
  1637                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
  1638                              HR_FORMAT_PARAMS(hr), p2i(hr->next_top_at_mark_start()),
  1639                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
  1642     _failures += failures;
  1644     // We could stop iteration over the heap when we
  1645     // find the first violating region by returning true.
  1646     return false;
  1648 };
  1650 class G1ParVerifyFinalCountTask: public AbstractGangTask {
  1651 protected:
  1652   G1CollectedHeap* _g1h;
  1653   ConcurrentMark* _cm;
  1654   BitMap* _actual_region_bm;
  1655   BitMap* _actual_card_bm;
  1657   uint    _n_workers;
  1659   BitMap* _expected_region_bm;
  1660   BitMap* _expected_card_bm;
  1662   int  _failures;
  1663   bool _verbose;
  1665 public:
  1666   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
  1667                             BitMap* region_bm, BitMap* card_bm,
  1668                             BitMap* expected_region_bm, BitMap* expected_card_bm)
  1669     : AbstractGangTask("G1 verify final counting"),
  1670       _g1h(g1h), _cm(_g1h->concurrent_mark()),
  1671       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
  1672       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
  1673       _failures(0), _verbose(false),
  1674       _n_workers(0) {
  1675     assert(VerifyDuringGC, "don't call this otherwise");
  1677     // Use the value already set as the number of active threads
  1678     // in the call to run_task().
  1679     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1680       assert( _g1h->workers()->active_workers() > 0,
  1681         "Should have been previously set");
  1682       _n_workers = _g1h->workers()->active_workers();
  1683     } else {
  1684       _n_workers = 1;
  1687     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
  1688     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
  1690     _verbose = _cm->verbose_medium();
  1693   void work(uint worker_id) {
  1694     assert(worker_id < _n_workers, "invariant");
  1696     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
  1697                                             _actual_region_bm, _actual_card_bm,
  1698                                             _expected_region_bm,
  1699                                             _expected_card_bm,
  1700                                             _verbose);
  1702     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1703       _g1h->heap_region_par_iterate_chunked(&verify_cl,
  1704                                             worker_id,
  1705                                             _n_workers,
  1706                                             HeapRegion::VerifyCountClaimValue);
  1707     } else {
  1708       _g1h->heap_region_iterate(&verify_cl);
  1711     Atomic::add(verify_cl.failures(), &_failures);
  1714   int failures() const { return _failures; }
  1715 };
  1717 // Closure that finalizes the liveness counting data.
  1718 // Used during the cleanup pause.
  1719 // Sets the bits corresponding to the interval [NTAMS, top]
  1720 // (which contains the implicitly live objects) in the
  1721 // card liveness bitmap. Also sets the bit for each region,
  1722 // containing live data, in the region liveness bitmap.
  1724 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
  1725  public:
  1726   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
  1727                               BitMap* region_bm,
  1728                               BitMap* card_bm) :
  1729     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
  1731   bool doHeapRegion(HeapRegion* hr) {
  1733     if (hr->continuesHumongous()) {
  1734       // We will ignore these here and process them when their
  1735       // associated "starts humongous" region is processed (see
  1736       // set_bit_for_heap_region()). Note that we cannot rely on their
  1737       // associated "starts humongous" region to have their bit set to
  1738       // 1 since, due to the region chunking in the parallel region
  1739       // iteration, a "continues humongous" region might be visited
  1740       // before its associated "starts humongous".
  1741       return false;
  1744     HeapWord* ntams = hr->next_top_at_mark_start();
  1745     HeapWord* top   = hr->top();
  1747     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
  1749     // Mark the allocated-since-marking portion...
  1750     if (ntams < top) {
  1751       // This definitely means the region has live objects.
  1752       set_bit_for_region(hr);
  1754       // Now set the bits in the card bitmap for [ntams, top)
  1755       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
  1756       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
  1758       // Note: if we're looking at the last region in heap - top
  1759       // could be actually just beyond the end of the heap; end_idx
  1760       // will then correspond to a (non-existent) card that is also
  1761       // just beyond the heap.
  1762       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
  1763         // end of object is not card aligned - increment to cover
  1764         // all the cards spanned by the object
  1765         end_idx += 1;
  1768       assert(end_idx <= _card_bm->size(),
  1769              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
  1770                      end_idx, _card_bm->size()));
  1771       assert(start_idx < _card_bm->size(),
  1772              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
  1773                      start_idx, _card_bm->size()));
  1775       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
  1778     // Set the bit for the region if it contains live data
  1779     if (hr->next_marked_bytes() > 0) {
  1780       set_bit_for_region(hr);
  1783     return false;
  1785 };
  1787 class G1ParFinalCountTask: public AbstractGangTask {
  1788 protected:
  1789   G1CollectedHeap* _g1h;
  1790   ConcurrentMark* _cm;
  1791   BitMap* _actual_region_bm;
  1792   BitMap* _actual_card_bm;
  1794   uint    _n_workers;
  1796 public:
  1797   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
  1798     : AbstractGangTask("G1 final counting"),
  1799       _g1h(g1h), _cm(_g1h->concurrent_mark()),
  1800       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
  1801       _n_workers(0) {
  1802     // Use the value already set as the number of active threads
  1803     // in the call to run_task().
  1804     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1805       assert( _g1h->workers()->active_workers() > 0,
  1806         "Should have been previously set");
  1807       _n_workers = _g1h->workers()->active_workers();
  1808     } else {
  1809       _n_workers = 1;
  1813   void work(uint worker_id) {
  1814     assert(worker_id < _n_workers, "invariant");
  1816     FinalCountDataUpdateClosure final_update_cl(_g1h,
  1817                                                 _actual_region_bm,
  1818                                                 _actual_card_bm);
  1820     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1821       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
  1822                                             worker_id,
  1823                                             _n_workers,
  1824                                             HeapRegion::FinalCountClaimValue);
  1825     } else {
  1826       _g1h->heap_region_iterate(&final_update_cl);
  1829 };
  1831 class G1ParNoteEndTask;
  1833 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
  1834   G1CollectedHeap* _g1;
  1835   size_t _max_live_bytes;
  1836   uint _regions_claimed;
  1837   size_t _freed_bytes;
  1838   FreeRegionList* _local_cleanup_list;
  1839   HeapRegionSetCount _old_regions_removed;
  1840   HeapRegionSetCount _humongous_regions_removed;
  1841   HRRSCleanupTask* _hrrs_cleanup_task;
  1842   double _claimed_region_time;
  1843   double _max_region_time;
  1845 public:
  1846   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
  1847                              FreeRegionList* local_cleanup_list,
  1848                              HRRSCleanupTask* hrrs_cleanup_task) :
  1849     _g1(g1),
  1850     _max_live_bytes(0), _regions_claimed(0),
  1851     _freed_bytes(0),
  1852     _claimed_region_time(0.0), _max_region_time(0.0),
  1853     _local_cleanup_list(local_cleanup_list),
  1854     _old_regions_removed(),
  1855     _humongous_regions_removed(),
  1856     _hrrs_cleanup_task(hrrs_cleanup_task) { }
  1858   size_t freed_bytes() { return _freed_bytes; }
  1859   const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; }
  1860   const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
  1862   bool doHeapRegion(HeapRegion *hr) {
  1863     if (hr->continuesHumongous()) {
  1864       return false;
  1866     // We use a claim value of zero here because all regions
  1867     // were claimed with value 1 in the FinalCount task.
  1868     _g1->reset_gc_time_stamps(hr);
  1869     double start = os::elapsedTime();
  1870     _regions_claimed++;
  1871     hr->note_end_of_marking();
  1872     _max_live_bytes += hr->max_live_bytes();
  1874     if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
  1875       _freed_bytes += hr->used();
  1876       hr->set_containing_set(NULL);
  1877       if (hr->isHumongous()) {
  1878         assert(hr->startsHumongous(), "we should only see starts humongous");
  1879         _humongous_regions_removed.increment(1u, hr->capacity());
  1880         _g1->free_humongous_region(hr, _local_cleanup_list, true);
  1881       } else {
  1882         _old_regions_removed.increment(1u, hr->capacity());
  1883         _g1->free_region(hr, _local_cleanup_list, true);
  1885     } else {
  1886       hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
  1889     double region_time = (os::elapsedTime() - start);
  1890     _claimed_region_time += region_time;
  1891     if (region_time > _max_region_time) {
  1892       _max_region_time = region_time;
  1894     return false;
  1897   size_t max_live_bytes() { return _max_live_bytes; }
  1898   uint regions_claimed() { return _regions_claimed; }
  1899   double claimed_region_time_sec() { return _claimed_region_time; }
  1900   double max_region_time_sec() { return _max_region_time; }
  1901 };
  1903 class G1ParNoteEndTask: public AbstractGangTask {
  1904   friend class G1NoteEndOfConcMarkClosure;
  1906 protected:
  1907   G1CollectedHeap* _g1h;
  1908   size_t _max_live_bytes;
  1909   size_t _freed_bytes;
  1910   FreeRegionList* _cleanup_list;
  1912 public:
  1913   G1ParNoteEndTask(G1CollectedHeap* g1h,
  1914                    FreeRegionList* cleanup_list) :
  1915     AbstractGangTask("G1 note end"), _g1h(g1h),
  1916     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
  1918   void work(uint worker_id) {
  1919     double start = os::elapsedTime();
  1920     FreeRegionList local_cleanup_list("Local Cleanup List");
  1921     HRRSCleanupTask hrrs_cleanup_task;
  1922     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
  1923                                            &hrrs_cleanup_task);
  1924     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1925       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
  1926                                             _g1h->workers()->active_workers(),
  1927                                             HeapRegion::NoteEndClaimValue);
  1928     } else {
  1929       _g1h->heap_region_iterate(&g1_note_end);
  1931     assert(g1_note_end.complete(), "Shouldn't have yielded!");
  1933     // Now update the lists
  1934     _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
  1936       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  1937       _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
  1938       _max_live_bytes += g1_note_end.max_live_bytes();
  1939       _freed_bytes += g1_note_end.freed_bytes();
  1941       // If we iterate over the global cleanup list at the end of
  1942       // cleanup to do this printing we will not guarantee to only
  1943       // generate output for the newly-reclaimed regions (the list
  1944       // might not be empty at the beginning of cleanup; we might
  1945       // still be working on its previous contents). So we do the
  1946       // printing here, before we append the new regions to the global
  1947       // cleanup list.
  1949       G1HRPrinter* hr_printer = _g1h->hr_printer();
  1950       if (hr_printer->is_active()) {
  1951         FreeRegionListIterator iter(&local_cleanup_list);
  1952         while (iter.more_available()) {
  1953           HeapRegion* hr = iter.get_next();
  1954           hr_printer->cleanup(hr);
  1958       _cleanup_list->add_ordered(&local_cleanup_list);
  1959       assert(local_cleanup_list.is_empty(), "post-condition");
  1961       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
  1964   size_t max_live_bytes() { return _max_live_bytes; }
  1965   size_t freed_bytes() { return _freed_bytes; }
  1966 };
  1968 class G1ParScrubRemSetTask: public AbstractGangTask {
  1969 protected:
  1970   G1RemSet* _g1rs;
  1971   BitMap* _region_bm;
  1972   BitMap* _card_bm;
  1973 public:
  1974   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
  1975                        BitMap* region_bm, BitMap* card_bm) :
  1976     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
  1977     _region_bm(region_bm), _card_bm(card_bm) { }
  1979   void work(uint worker_id) {
  1980     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1981       _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
  1982                        HeapRegion::ScrubRemSetClaimValue);
  1983     } else {
  1984       _g1rs->scrub(_region_bm, _card_bm);
  1988 };
  1990 void ConcurrentMark::cleanup() {
  1991   // world is stopped at this checkpoint
  1992   assert(SafepointSynchronize::is_at_safepoint(),
  1993          "world should be stopped");
  1994   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1996   // If a full collection has happened, we shouldn't do this.
  1997   if (has_aborted()) {
  1998     g1h->set_marking_complete(); // So bitmap clearing isn't confused
  1999     return;
  2002   g1h->verify_region_sets_optional();
  2004   if (VerifyDuringGC) {
  2005     HandleMark hm;  // handle scope
  2006     Universe::heap()->prepare_for_verify();
  2007     Universe::verify(VerifyOption_G1UsePrevMarking,
  2008                      " VerifyDuringGC:(before)");
  2010   g1h->check_bitmaps("Cleanup Start");
  2012   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
  2013   g1p->record_concurrent_mark_cleanup_start();
  2015   double start = os::elapsedTime();
  2017   HeapRegionRemSet::reset_for_cleanup_tasks();
  2019   uint n_workers;
  2021   // Do counting once more with the world stopped for good measure.
  2022   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
  2024   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2025    assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2026            "sanity check");
  2028     g1h->set_par_threads();
  2029     n_workers = g1h->n_par_threads();
  2030     assert(g1h->n_par_threads() == n_workers,
  2031            "Should not have been reset");
  2032     g1h->workers()->run_task(&g1_par_count_task);
  2033     // Done with the parallel phase so reset to 0.
  2034     g1h->set_par_threads(0);
  2036     assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
  2037            "sanity check");
  2038   } else {
  2039     n_workers = 1;
  2040     g1_par_count_task.work(0);
  2043   if (VerifyDuringGC) {
  2044     // Verify that the counting data accumulated during marking matches
  2045     // that calculated by walking the marking bitmap.
  2047     // Bitmaps to hold expected values
  2048     BitMap expected_region_bm(_region_bm.size(), true);
  2049     BitMap expected_card_bm(_card_bm.size(), true);
  2051     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
  2052                                                  &_region_bm,
  2053                                                  &_card_bm,
  2054                                                  &expected_region_bm,
  2055                                                  &expected_card_bm);
  2057     if (G1CollectedHeap::use_parallel_gc_threads()) {
  2058       g1h->set_par_threads((int)n_workers);
  2059       g1h->workers()->run_task(&g1_par_verify_task);
  2060       // Done with the parallel phase so reset to 0.
  2061       g1h->set_par_threads(0);
  2063       assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
  2064              "sanity check");
  2065     } else {
  2066       g1_par_verify_task.work(0);
  2069     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
  2072   size_t start_used_bytes = g1h->used();
  2073   g1h->set_marking_complete();
  2075   double count_end = os::elapsedTime();
  2076   double this_final_counting_time = (count_end - start);
  2077   _total_counting_time += this_final_counting_time;
  2079   if (G1PrintRegionLivenessInfo) {
  2080     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
  2081     _g1h->heap_region_iterate(&cl);
  2084   // Install newly created mark bitMap as "prev".
  2085   swapMarkBitMaps();
  2087   g1h->reset_gc_time_stamp();
  2089   // Note end of marking in all heap regions.
  2090   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
  2091   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2092     g1h->set_par_threads((int)n_workers);
  2093     g1h->workers()->run_task(&g1_par_note_end_task);
  2094     g1h->set_par_threads(0);
  2096     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
  2097            "sanity check");
  2098   } else {
  2099     g1_par_note_end_task.work(0);
  2101   g1h->check_gc_time_stamps();
  2103   if (!cleanup_list_is_empty()) {
  2104     // The cleanup list is not empty, so we'll have to process it
  2105     // concurrently. Notify anyone else that might be wanting free
  2106     // regions that there will be more free regions coming soon.
  2107     g1h->set_free_regions_coming();
  2110   // call below, since it affects the metric by which we sort the heap
  2111   // regions.
  2112   if (G1ScrubRemSets) {
  2113     double rs_scrub_start = os::elapsedTime();
  2114     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
  2115     if (G1CollectedHeap::use_parallel_gc_threads()) {
  2116       g1h->set_par_threads((int)n_workers);
  2117       g1h->workers()->run_task(&g1_par_scrub_rs_task);
  2118       g1h->set_par_threads(0);
  2120       assert(g1h->check_heap_region_claim_values(
  2121                                             HeapRegion::ScrubRemSetClaimValue),
  2122              "sanity check");
  2123     } else {
  2124       g1_par_scrub_rs_task.work(0);
  2127     double rs_scrub_end = os::elapsedTime();
  2128     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
  2129     _total_rs_scrub_time += this_rs_scrub_time;
  2132   // this will also free any regions totally full of garbage objects,
  2133   // and sort the regions.
  2134   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
  2136   // Statistics.
  2137   double end = os::elapsedTime();
  2138   _cleanup_times.add((end - start) * 1000.0);
  2140   if (G1Log::fine()) {
  2141     g1h->print_size_transition(gclog_or_tty,
  2142                                start_used_bytes,
  2143                                g1h->used(),
  2144                                g1h->capacity());
  2147   // Clean up will have freed any regions completely full of garbage.
  2148   // Update the soft reference policy with the new heap occupancy.
  2149   Universe::update_heap_info_at_gc();
  2151   if (VerifyDuringGC) {
  2152     HandleMark hm;  // handle scope
  2153     Universe::heap()->prepare_for_verify();
  2154     Universe::verify(VerifyOption_G1UsePrevMarking,
  2155                      " VerifyDuringGC:(after)");
  2157   g1h->check_bitmaps("Cleanup End");
  2159   g1h->verify_region_sets_optional();
  2161   // We need to make this be a "collection" so any collection pause that
  2162   // races with it goes around and waits for completeCleanup to finish.
  2163   g1h->increment_total_collections();
  2165   // Clean out dead classes and update Metaspace sizes.
  2166   if (ClassUnloadingWithConcurrentMark) {
  2167     ClassLoaderDataGraph::purge();
  2169   MetaspaceGC::compute_new_size();
  2171   // We reclaimed old regions so we should calculate the sizes to make
  2172   // sure we update the old gen/space data.
  2173   g1h->g1mm()->update_sizes();
  2175   g1h->trace_heap_after_concurrent_cycle();
  2178 void ConcurrentMark::completeCleanup() {
  2179   if (has_aborted()) return;
  2181   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  2183   _cleanup_list.verify_optional();
  2184   FreeRegionList tmp_free_list("Tmp Free List");
  2186   if (G1ConcRegionFreeingVerbose) {
  2187     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
  2188                            "cleanup list has %u entries",
  2189                            _cleanup_list.length());
  2192   // No one else should be accessing the _cleanup_list at this point,
  2193   // so it is not necessary to take any locks
  2194   while (!_cleanup_list.is_empty()) {
  2195     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
  2196     assert(hr != NULL, "Got NULL from a non-empty list");
  2197     hr->par_clear();
  2198     tmp_free_list.add_ordered(hr);
  2200     // Instead of adding one region at a time to the secondary_free_list,
  2201     // we accumulate them in the local list and move them a few at a
  2202     // time. This also cuts down on the number of notify_all() calls
  2203     // we do during this process. We'll also append the local list when
  2204     // _cleanup_list is empty (which means we just removed the last
  2205     // region from the _cleanup_list).
  2206     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
  2207         _cleanup_list.is_empty()) {
  2208       if (G1ConcRegionFreeingVerbose) {
  2209         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
  2210                                "appending %u entries to the secondary_free_list, "
  2211                                "cleanup list still has %u entries",
  2212                                tmp_free_list.length(),
  2213                                _cleanup_list.length());
  2217         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  2218         g1h->secondary_free_list_add(&tmp_free_list);
  2219         SecondaryFreeList_lock->notify_all();
  2222       if (G1StressConcRegionFreeing) {
  2223         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
  2224           os::sleep(Thread::current(), (jlong) 1, false);
  2229   assert(tmp_free_list.is_empty(), "post-condition");
  2232 // Supporting Object and Oop closures for reference discovery
  2233 // and processing in during marking
  2235 bool G1CMIsAliveClosure::do_object_b(oop obj) {
  2236   HeapWord* addr = (HeapWord*)obj;
  2237   return addr != NULL &&
  2238          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
  2241 // 'Keep Alive' oop closure used by both serial parallel reference processing.
  2242 // Uses the CMTask associated with a worker thread (for serial reference
  2243 // processing the CMTask for worker 0 is used) to preserve (mark) and
  2244 // trace referent objects.
  2245 //
  2246 // Using the CMTask and embedded local queues avoids having the worker
  2247 // threads operating on the global mark stack. This reduces the risk
  2248 // of overflowing the stack - which we would rather avoid at this late
  2249 // state. Also using the tasks' local queues removes the potential
  2250 // of the workers interfering with each other that could occur if
  2251 // operating on the global stack.
  2253 class G1CMKeepAliveAndDrainClosure: public OopClosure {
  2254   ConcurrentMark* _cm;
  2255   CMTask*         _task;
  2256   int             _ref_counter_limit;
  2257   int             _ref_counter;
  2258   bool            _is_serial;
  2259  public:
  2260   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
  2261     _cm(cm), _task(task), _is_serial(is_serial),
  2262     _ref_counter_limit(G1RefProcDrainInterval) {
  2263     assert(_ref_counter_limit > 0, "sanity");
  2264     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
  2265     _ref_counter = _ref_counter_limit;
  2268   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  2269   virtual void do_oop(      oop* p) { do_oop_work(p); }
  2271   template <class T> void do_oop_work(T* p) {
  2272     if (!_cm->has_overflown()) {
  2273       oop obj = oopDesc::load_decode_heap_oop(p);
  2274       if (_cm->verbose_high()) {
  2275         gclog_or_tty->print_cr("\t[%u] we're looking at location "
  2276                                "*"PTR_FORMAT" = "PTR_FORMAT,
  2277                                _task->worker_id(), p2i(p), p2i((void*) obj));
  2280       _task->deal_with_reference(obj);
  2281       _ref_counter--;
  2283       if (_ref_counter == 0) {
  2284         // We have dealt with _ref_counter_limit references, pushing them
  2285         // and objects reachable from them on to the local stack (and
  2286         // possibly the global stack). Call CMTask::do_marking_step() to
  2287         // process these entries.
  2288         //
  2289         // We call CMTask::do_marking_step() in a loop, which we'll exit if
  2290         // there's nothing more to do (i.e. we're done with the entries that
  2291         // were pushed as a result of the CMTask::deal_with_reference() calls
  2292         // above) or we overflow.
  2293         //
  2294         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
  2295         // flag while there may still be some work to do. (See the comment at
  2296         // the beginning of CMTask::do_marking_step() for those conditions -
  2297         // one of which is reaching the specified time target.) It is only
  2298         // when CMTask::do_marking_step() returns without setting the
  2299         // has_aborted() flag that the marking step has completed.
  2300         do {
  2301           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
  2302           _task->do_marking_step(mark_step_duration_ms,
  2303                                  false      /* do_termination */,
  2304                                  _is_serial);
  2305         } while (_task->has_aborted() && !_cm->has_overflown());
  2306         _ref_counter = _ref_counter_limit;
  2308     } else {
  2309       if (_cm->verbose_high()) {
  2310          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
  2314 };
  2316 // 'Drain' oop closure used by both serial and parallel reference processing.
  2317 // Uses the CMTask associated with a given worker thread (for serial
  2318 // reference processing the CMtask for worker 0 is used). Calls the
  2319 // do_marking_step routine, with an unbelievably large timeout value,
  2320 // to drain the marking data structures of the remaining entries
  2321 // added by the 'keep alive' oop closure above.
  2323 class G1CMDrainMarkingStackClosure: public VoidClosure {
  2324   ConcurrentMark* _cm;
  2325   CMTask*         _task;
  2326   bool            _is_serial;
  2327  public:
  2328   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
  2329     _cm(cm), _task(task), _is_serial(is_serial) {
  2330     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
  2333   void do_void() {
  2334     do {
  2335       if (_cm->verbose_high()) {
  2336         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
  2337                                _task->worker_id(), BOOL_TO_STR(_is_serial));
  2340       // We call CMTask::do_marking_step() to completely drain the local
  2341       // and global marking stacks of entries pushed by the 'keep alive'
  2342       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
  2343       //
  2344       // CMTask::do_marking_step() is called in a loop, which we'll exit
  2345       // if there's nothing more to do (i.e. we'completely drained the
  2346       // entries that were pushed as a a result of applying the 'keep alive'
  2347       // closure to the entries on the discovered ref lists) or we overflow
  2348       // the global marking stack.
  2349       //
  2350       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
  2351       // flag while there may still be some work to do. (See the comment at
  2352       // the beginning of CMTask::do_marking_step() for those conditions -
  2353       // one of which is reaching the specified time target.) It is only
  2354       // when CMTask::do_marking_step() returns without setting the
  2355       // has_aborted() flag that the marking step has completed.
  2357       _task->do_marking_step(1000000000.0 /* something very large */,
  2358                              true         /* do_termination */,
  2359                              _is_serial);
  2360     } while (_task->has_aborted() && !_cm->has_overflown());
  2362 };
  2364 // Implementation of AbstractRefProcTaskExecutor for parallel
  2365 // reference processing at the end of G1 concurrent marking
  2367 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
  2368 private:
  2369   G1CollectedHeap* _g1h;
  2370   ConcurrentMark*  _cm;
  2371   WorkGang*        _workers;
  2372   int              _active_workers;
  2374 public:
  2375   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
  2376                         ConcurrentMark* cm,
  2377                         WorkGang* workers,
  2378                         int n_workers) :
  2379     _g1h(g1h), _cm(cm),
  2380     _workers(workers), _active_workers(n_workers) { }
  2382   // Executes the given task using concurrent marking worker threads.
  2383   virtual void execute(ProcessTask& task);
  2384   virtual void execute(EnqueueTask& task);
  2385 };
  2387 class G1CMRefProcTaskProxy: public AbstractGangTask {
  2388   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
  2389   ProcessTask&     _proc_task;
  2390   G1CollectedHeap* _g1h;
  2391   ConcurrentMark*  _cm;
  2393 public:
  2394   G1CMRefProcTaskProxy(ProcessTask& proc_task,
  2395                      G1CollectedHeap* g1h,
  2396                      ConcurrentMark* cm) :
  2397     AbstractGangTask("Process reference objects in parallel"),
  2398     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
  2399     ReferenceProcessor* rp = _g1h->ref_processor_cm();
  2400     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
  2403   virtual void work(uint worker_id) {
  2404     ResourceMark rm;
  2405     HandleMark hm;
  2406     CMTask* task = _cm->task(worker_id);
  2407     G1CMIsAliveClosure g1_is_alive(_g1h);
  2408     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
  2409     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
  2411     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
  2413 };
  2415 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
  2416   assert(_workers != NULL, "Need parallel worker threads.");
  2417   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
  2419   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
  2421   // We need to reset the concurrency level before each
  2422   // proxy task execution, so that the termination protocol
  2423   // and overflow handling in CMTask::do_marking_step() knows
  2424   // how many workers to wait for.
  2425   _cm->set_concurrency(_active_workers);
  2426   _g1h->set_par_threads(_active_workers);
  2427   _workers->run_task(&proc_task_proxy);
  2428   _g1h->set_par_threads(0);
  2431 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
  2432   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
  2433   EnqueueTask& _enq_task;
  2435 public:
  2436   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
  2437     AbstractGangTask("Enqueue reference objects in parallel"),
  2438     _enq_task(enq_task) { }
  2440   virtual void work(uint worker_id) {
  2441     _enq_task.work(worker_id);
  2443 };
  2445 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
  2446   assert(_workers != NULL, "Need parallel worker threads.");
  2447   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
  2449   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
  2451   // Not strictly necessary but...
  2452   //
  2453   // We need to reset the concurrency level before each
  2454   // proxy task execution, so that the termination protocol
  2455   // and overflow handling in CMTask::do_marking_step() knows
  2456   // how many workers to wait for.
  2457   _cm->set_concurrency(_active_workers);
  2458   _g1h->set_par_threads(_active_workers);
  2459   _workers->run_task(&enq_task_proxy);
  2460   _g1h->set_par_threads(0);
  2463 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
  2464   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
  2467 // Helper class to get rid of some boilerplate code.
  2468 class G1RemarkGCTraceTime : public GCTraceTime {
  2469   static bool doit_and_prepend(bool doit) {
  2470     if (doit) {
  2471       gclog_or_tty->put(' ');
  2473     return doit;
  2476  public:
  2477   G1RemarkGCTraceTime(const char* title, bool doit)
  2478     : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
  2479         G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
  2481 };
  2483 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
  2484   if (has_overflown()) {
  2485     // Skip processing the discovered references if we have
  2486     // overflown the global marking stack. Reference objects
  2487     // only get discovered once so it is OK to not
  2488     // de-populate the discovered reference lists. We could have,
  2489     // but the only benefit would be that, when marking restarts,
  2490     // less reference objects are discovered.
  2491     return;
  2494   ResourceMark rm;
  2495   HandleMark   hm;
  2497   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  2499   // Is alive closure.
  2500   G1CMIsAliveClosure g1_is_alive(g1h);
  2502   // Inner scope to exclude the cleaning of the string and symbol
  2503   // tables from the displayed time.
  2505     if (G1Log::finer()) {
  2506       gclog_or_tty->put(' ');
  2508     GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
  2510     ReferenceProcessor* rp = g1h->ref_processor_cm();
  2512     // See the comment in G1CollectedHeap::ref_processing_init()
  2513     // about how reference processing currently works in G1.
  2515     // Set the soft reference policy
  2516     rp->setup_policy(clear_all_soft_refs);
  2517     assert(_markStack.isEmpty(), "mark stack should be empty");
  2519     // Instances of the 'Keep Alive' and 'Complete GC' closures used
  2520     // in serial reference processing. Note these closures are also
  2521     // used for serially processing (by the the current thread) the
  2522     // JNI references during parallel reference processing.
  2523     //
  2524     // These closures do not need to synchronize with the worker
  2525     // threads involved in parallel reference processing as these
  2526     // instances are executed serially by the current thread (e.g.
  2527     // reference processing is not multi-threaded and is thus
  2528     // performed by the current thread instead of a gang worker).
  2529     //
  2530     // The gang tasks involved in parallel reference procssing create
  2531     // their own instances of these closures, which do their own
  2532     // synchronization among themselves.
  2533     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
  2534     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
  2536     // We need at least one active thread. If reference processing
  2537     // is not multi-threaded we use the current (VMThread) thread,
  2538     // otherwise we use the work gang from the G1CollectedHeap and
  2539     // we utilize all the worker threads we can.
  2540     bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
  2541     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
  2542     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
  2544     // Parallel processing task executor.
  2545     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
  2546                                               g1h->workers(), active_workers);
  2547     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
  2549     // Set the concurrency level. The phase was already set prior to
  2550     // executing the remark task.
  2551     set_concurrency(active_workers);
  2553     // Set the degree of MT processing here.  If the discovery was done MT,
  2554     // the number of threads involved during discovery could differ from
  2555     // the number of active workers.  This is OK as long as the discovered
  2556     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
  2557     rp->set_active_mt_degree(active_workers);
  2559     // Process the weak references.
  2560     const ReferenceProcessorStats& stats =
  2561         rp->process_discovered_references(&g1_is_alive,
  2562                                           &g1_keep_alive,
  2563                                           &g1_drain_mark_stack,
  2564                                           executor,
  2565                                           g1h->gc_timer_cm(),
  2566                                           concurrent_gc_id());
  2567     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
  2569     // The do_oop work routines of the keep_alive and drain_marking_stack
  2570     // oop closures will set the has_overflown flag if we overflow the
  2571     // global marking stack.
  2573     assert(_markStack.overflow() || _markStack.isEmpty(),
  2574             "mark stack should be empty (unless it overflowed)");
  2576     if (_markStack.overflow()) {
  2577       // This should have been done already when we tried to push an
  2578       // entry on to the global mark stack. But let's do it again.
  2579       set_has_overflown();
  2582     assert(rp->num_q() == active_workers, "why not");
  2584     rp->enqueue_discovered_references(executor);
  2586     rp->verify_no_references_recorded();
  2587     assert(!rp->discovery_enabled(), "Post condition");
  2590   if (has_overflown()) {
  2591     // We can not trust g1_is_alive if the marking stack overflowed
  2592     return;
  2595   assert(_markStack.isEmpty(), "Marking should have completed");
  2597   // Unload Klasses, String, Symbols, Code Cache, etc.
  2599     G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
  2601     if (ClassUnloadingWithConcurrentMark) {
  2602       bool purged_classes;
  2605         G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
  2606         purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
  2610         G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
  2611         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
  2615     if (G1StringDedup::is_enabled()) {
  2616       G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
  2617       G1StringDedup::unlink(&g1_is_alive);
  2622 void ConcurrentMark::swapMarkBitMaps() {
  2623   CMBitMapRO* temp = _prevMarkBitMap;
  2624   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
  2625   _nextMarkBitMap  = (CMBitMap*)  temp;
  2628 class CMObjectClosure;
  2630 // Closure for iterating over objects, currently only used for
  2631 // processing SATB buffers.
  2632 class CMObjectClosure : public ObjectClosure {
  2633 private:
  2634   CMTask* _task;
  2636 public:
  2637   void do_object(oop obj) {
  2638     _task->deal_with_reference(obj);
  2641   CMObjectClosure(CMTask* task) : _task(task) { }
  2642 };
  2644 class G1RemarkThreadsClosure : public ThreadClosure {
  2645   CMObjectClosure _cm_obj;
  2646   G1CMOopClosure _cm_cl;
  2647   MarkingCodeBlobClosure _code_cl;
  2648   int _thread_parity;
  2649   bool _is_par;
  2651  public:
  2652   G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
  2653     _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
  2654     _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
  2656   void do_thread(Thread* thread) {
  2657     if (thread->is_Java_thread()) {
  2658       if (thread->claim_oops_do(_is_par, _thread_parity)) {
  2659         JavaThread* jt = (JavaThread*)thread;
  2661         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
  2662         // however the liveness of oops reachable from nmethods have very complex lifecycles:
  2663         // * Alive if on the stack of an executing method
  2664         // * Weakly reachable otherwise
  2665         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
  2666         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
  2667         jt->nmethods_do(&_code_cl);
  2669         jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
  2671     } else if (thread->is_VM_thread()) {
  2672       if (thread->claim_oops_do(_is_par, _thread_parity)) {
  2673         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
  2677 };
  2679 class CMRemarkTask: public AbstractGangTask {
  2680 private:
  2681   ConcurrentMark* _cm;
  2682   bool            _is_serial;
  2683 public:
  2684   void work(uint worker_id) {
  2685     // Since all available tasks are actually started, we should
  2686     // only proceed if we're supposed to be actived.
  2687     if (worker_id < _cm->active_tasks()) {
  2688       CMTask* task = _cm->task(worker_id);
  2689       task->record_start_time();
  2691         ResourceMark rm;
  2692         HandleMark hm;
  2694         G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
  2695         Threads::threads_do(&threads_f);
  2698       do {
  2699         task->do_marking_step(1000000000.0 /* something very large */,
  2700                               true         /* do_termination       */,
  2701                               _is_serial);
  2702       } while (task->has_aborted() && !_cm->has_overflown());
  2703       // If we overflow, then we do not want to restart. We instead
  2704       // want to abort remark and do concurrent marking again.
  2705       task->record_end_time();
  2709   CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
  2710     AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
  2711     _cm->terminator()->reset_for_reuse(active_workers);
  2713 };
  2715 void ConcurrentMark::checkpointRootsFinalWork() {
  2716   ResourceMark rm;
  2717   HandleMark   hm;
  2718   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  2720   G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
  2722   g1h->ensure_parsability(false);
  2724   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2725     G1CollectedHeap::StrongRootsScope srs(g1h);
  2726     // this is remark, so we'll use up all active threads
  2727     uint active_workers = g1h->workers()->active_workers();
  2728     if (active_workers == 0) {
  2729       assert(active_workers > 0, "Should have been set earlier");
  2730       active_workers = (uint) ParallelGCThreads;
  2731       g1h->workers()->set_active_workers(active_workers);
  2733     set_concurrency_and_phase(active_workers, false /* concurrent */);
  2734     // Leave _parallel_marking_threads at it's
  2735     // value originally calculated in the ConcurrentMark
  2736     // constructor and pass values of the active workers
  2737     // through the gang in the task.
  2739     CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
  2740     // We will start all available threads, even if we decide that the
  2741     // active_workers will be fewer. The extra ones will just bail out
  2742     // immediately.
  2743     g1h->set_par_threads(active_workers);
  2744     g1h->workers()->run_task(&remarkTask);
  2745     g1h->set_par_threads(0);
  2746   } else {
  2747     G1CollectedHeap::StrongRootsScope srs(g1h);
  2748     uint active_workers = 1;
  2749     set_concurrency_and_phase(active_workers, false /* concurrent */);
  2751     // Note - if there's no work gang then the VMThread will be
  2752     // the thread to execute the remark - serially. We have
  2753     // to pass true for the is_serial parameter so that
  2754     // CMTask::do_marking_step() doesn't enter the sync
  2755     // barriers in the event of an overflow. Doing so will
  2756     // cause an assert that the current thread is not a
  2757     // concurrent GC thread.
  2758     CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
  2759     remarkTask.work(0);
  2761   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  2762   guarantee(has_overflown() ||
  2763             satb_mq_set.completed_buffers_num() == 0,
  2764             err_msg("Invariant: has_overflown = %s, num buffers = %d",
  2765                     BOOL_TO_STR(has_overflown()),
  2766                     satb_mq_set.completed_buffers_num()));
  2768   print_stats();
  2771 #ifndef PRODUCT
  2773 class PrintReachableOopClosure: public OopClosure {
  2774 private:
  2775   G1CollectedHeap* _g1h;
  2776   outputStream*    _out;
  2777   VerifyOption     _vo;
  2778   bool             _all;
  2780 public:
  2781   PrintReachableOopClosure(outputStream* out,
  2782                            VerifyOption  vo,
  2783                            bool          all) :
  2784     _g1h(G1CollectedHeap::heap()),
  2785     _out(out), _vo(vo), _all(all) { }
  2787   void do_oop(narrowOop* p) { do_oop_work(p); }
  2788   void do_oop(      oop* p) { do_oop_work(p); }
  2790   template <class T> void do_oop_work(T* p) {
  2791     oop         obj = oopDesc::load_decode_heap_oop(p);
  2792     const char* str = NULL;
  2793     const char* str2 = "";
  2795     if (obj == NULL) {
  2796       str = "";
  2797     } else if (!_g1h->is_in_g1_reserved(obj)) {
  2798       str = " O";
  2799     } else {
  2800       HeapRegion* hr  = _g1h->heap_region_containing(obj);
  2801       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
  2802       bool marked = _g1h->is_marked(obj, _vo);
  2804       if (over_tams) {
  2805         str = " >";
  2806         if (marked) {
  2807           str2 = " AND MARKED";
  2809       } else if (marked) {
  2810         str = " M";
  2811       } else {
  2812         str = " NOT";
  2816     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
  2817                    p2i(p), p2i((void*) obj), str, str2);
  2819 };
  2821 class PrintReachableObjectClosure : public ObjectClosure {
  2822 private:
  2823   G1CollectedHeap* _g1h;
  2824   outputStream*    _out;
  2825   VerifyOption     _vo;
  2826   bool             _all;
  2827   HeapRegion*      _hr;
  2829 public:
  2830   PrintReachableObjectClosure(outputStream* out,
  2831                               VerifyOption  vo,
  2832                               bool          all,
  2833                               HeapRegion*   hr) :
  2834     _g1h(G1CollectedHeap::heap()),
  2835     _out(out), _vo(vo), _all(all), _hr(hr) { }
  2837   void do_object(oop o) {
  2838     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
  2839     bool marked = _g1h->is_marked(o, _vo);
  2840     bool print_it = _all || over_tams || marked;
  2842     if (print_it) {
  2843       _out->print_cr(" "PTR_FORMAT"%s",
  2844                      p2i((void *)o), (over_tams) ? " >" : (marked) ? " M" : "");
  2845       PrintReachableOopClosure oopCl(_out, _vo, _all);
  2846       o->oop_iterate_no_header(&oopCl);
  2849 };
  2851 class PrintReachableRegionClosure : public HeapRegionClosure {
  2852 private:
  2853   G1CollectedHeap* _g1h;
  2854   outputStream*    _out;
  2855   VerifyOption     _vo;
  2856   bool             _all;
  2858 public:
  2859   bool doHeapRegion(HeapRegion* hr) {
  2860     HeapWord* b = hr->bottom();
  2861     HeapWord* e = hr->end();
  2862     HeapWord* t = hr->top();
  2863     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
  2864     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
  2865                    "TAMS: " PTR_FORMAT, p2i(b), p2i(e), p2i(t), p2i(p));
  2866     _out->cr();
  2868     HeapWord* from = b;
  2869     HeapWord* to   = t;
  2871     if (to > from) {
  2872       _out->print_cr("Objects in [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(from), p2i(to));
  2873       _out->cr();
  2874       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
  2875       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
  2876       _out->cr();
  2879     return false;
  2882   PrintReachableRegionClosure(outputStream* out,
  2883                               VerifyOption  vo,
  2884                               bool          all) :
  2885     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
  2886 };
  2888 void ConcurrentMark::print_reachable(const char* str,
  2889                                      VerifyOption vo,
  2890                                      bool all) {
  2891   gclog_or_tty->cr();
  2892   gclog_or_tty->print_cr("== Doing heap dump... ");
  2894   if (G1PrintReachableBaseFile == NULL) {
  2895     gclog_or_tty->print_cr("  #### error: no base file defined");
  2896     return;
  2899   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
  2900       (JVM_MAXPATHLEN - 1)) {
  2901     gclog_or_tty->print_cr("  #### error: file name too long");
  2902     return;
  2905   char file_name[JVM_MAXPATHLEN];
  2906   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
  2907   gclog_or_tty->print_cr("  dumping to file %s", file_name);
  2909   fileStream fout(file_name);
  2910   if (!fout.is_open()) {
  2911     gclog_or_tty->print_cr("  #### error: could not open file");
  2912     return;
  2915   outputStream* out = &fout;
  2916   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
  2917   out->cr();
  2919   out->print_cr("--- ITERATING OVER REGIONS");
  2920   out->cr();
  2921   PrintReachableRegionClosure rcl(out, vo, all);
  2922   _g1h->heap_region_iterate(&rcl);
  2923   out->cr();
  2925   gclog_or_tty->print_cr("  done");
  2926   gclog_or_tty->flush();
  2929 #endif // PRODUCT
  2931 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
  2932   // Note we are overriding the read-only view of the prev map here, via
  2933   // the cast.
  2934   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
  2937 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
  2938   _nextMarkBitMap->clearRange(mr);
  2941 HeapRegion*
  2942 ConcurrentMark::claim_region(uint worker_id) {
  2943   // "checkpoint" the finger
  2944   HeapWord* finger = _finger;
  2946   // _heap_end will not change underneath our feet; it only changes at
  2947   // yield points.
  2948   while (finger < _heap_end) {
  2949     assert(_g1h->is_in_g1_reserved(finger), "invariant");
  2951     // Note on how this code handles humongous regions. In the
  2952     // normal case the finger will reach the start of a "starts
  2953     // humongous" (SH) region. Its end will either be the end of the
  2954     // last "continues humongous" (CH) region in the sequence, or the
  2955     // standard end of the SH region (if the SH is the only region in
  2956     // the sequence). That way claim_region() will skip over the CH
  2957     // regions. However, there is a subtle race between a CM thread
  2958     // executing this method and a mutator thread doing a humongous
  2959     // object allocation. The two are not mutually exclusive as the CM
  2960     // thread does not need to hold the Heap_lock when it gets
  2961     // here. So there is a chance that claim_region() will come across
  2962     // a free region that's in the progress of becoming a SH or a CH
  2963     // region. In the former case, it will either
  2964     //   a) Miss the update to the region's end, in which case it will
  2965     //      visit every subsequent CH region, will find their bitmaps
  2966     //      empty, and do nothing, or
  2967     //   b) Will observe the update of the region's end (in which case
  2968     //      it will skip the subsequent CH regions).
  2969     // If it comes across a region that suddenly becomes CH, the
  2970     // scenario will be similar to b). So, the race between
  2971     // claim_region() and a humongous object allocation might force us
  2972     // to do a bit of unnecessary work (due to some unnecessary bitmap
  2973     // iterations) but it should not introduce and correctness issues.
  2974     HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
  2976     // Above heap_region_containing_raw may return NULL as we always scan claim
  2977     // until the end of the heap. In this case, just jump to the next region.
  2978     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
  2980     // Is the gap between reading the finger and doing the CAS too long?
  2981     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
  2982     if (res == finger && curr_region != NULL) {
  2983       // we succeeded
  2984       HeapWord*   bottom        = curr_region->bottom();
  2985       HeapWord*   limit         = curr_region->next_top_at_mark_start();
  2987       if (verbose_low()) {
  2988         gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
  2989                                "["PTR_FORMAT", "PTR_FORMAT"), "
  2990                                "limit = "PTR_FORMAT,
  2991                                worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
  2994       // notice that _finger == end cannot be guaranteed here since,
  2995       // someone else might have moved the finger even further
  2996       assert(_finger >= end, "the finger should have moved forward");
  2998       if (verbose_low()) {
  2999         gclog_or_tty->print_cr("[%u] we were successful with region = "
  3000                                PTR_FORMAT, worker_id, p2i(curr_region));
  3003       if (limit > bottom) {
  3004         if (verbose_low()) {
  3005           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
  3006                                  "returning it ", worker_id, p2i(curr_region));
  3008         return curr_region;
  3009       } else {
  3010         assert(limit == bottom,
  3011                "the region limit should be at bottom");
  3012         if (verbose_low()) {
  3013           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
  3014                                  "returning NULL", worker_id, p2i(curr_region));
  3016         // we return NULL and the caller should try calling
  3017         // claim_region() again.
  3018         return NULL;
  3020     } else {
  3021       assert(_finger > finger, "the finger should have moved forward");
  3022       if (verbose_low()) {
  3023         if (curr_region == NULL) {
  3024           gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
  3025                                  "global finger = "PTR_FORMAT", "
  3026                                  "our finger = "PTR_FORMAT,
  3027                                  worker_id, p2i(_finger), p2i(finger));
  3028         } else {
  3029           gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
  3030                                  "global finger = "PTR_FORMAT", "
  3031                                  "our finger = "PTR_FORMAT,
  3032                                  worker_id, p2i(_finger), p2i(finger));
  3036       // read it again
  3037       finger = _finger;
  3041   return NULL;
  3044 #ifndef PRODUCT
  3045 enum VerifyNoCSetOopsPhase {
  3046   VerifyNoCSetOopsStack,
  3047   VerifyNoCSetOopsQueues,
  3048   VerifyNoCSetOopsSATBCompleted,
  3049   VerifyNoCSetOopsSATBThread
  3050 };
  3052 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
  3053 private:
  3054   G1CollectedHeap* _g1h;
  3055   VerifyNoCSetOopsPhase _phase;
  3056   int _info;
  3058   const char* phase_str() {
  3059     switch (_phase) {
  3060     case VerifyNoCSetOopsStack:         return "Stack";
  3061     case VerifyNoCSetOopsQueues:        return "Queue";
  3062     case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
  3063     case VerifyNoCSetOopsSATBThread:    return "Thread SATB Buffers";
  3064     default:                            ShouldNotReachHere();
  3066     return NULL;
  3069   void do_object_work(oop obj) {
  3070     guarantee(!_g1h->obj_in_cs(obj),
  3071               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
  3072                       p2i((void*) obj), phase_str(), _info));
  3075 public:
  3076   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
  3078   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
  3079     _phase = phase;
  3080     _info = info;
  3083   virtual void do_oop(oop* p) {
  3084     oop obj = oopDesc::load_decode_heap_oop(p);
  3085     do_object_work(obj);
  3088   virtual void do_oop(narrowOop* p) {
  3089     // We should not come across narrow oops while scanning marking
  3090     // stacks and SATB buffers.
  3091     ShouldNotReachHere();
  3094   virtual void do_object(oop obj) {
  3095     do_object_work(obj);
  3097 };
  3099 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
  3100                                          bool verify_enqueued_buffers,
  3101                                          bool verify_thread_buffers,
  3102                                          bool verify_fingers) {
  3103   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
  3104   if (!G1CollectedHeap::heap()->mark_in_progress()) {
  3105     return;
  3108   VerifyNoCSetOopsClosure cl;
  3110   if (verify_stacks) {
  3111     // Verify entries on the global mark stack
  3112     cl.set_phase(VerifyNoCSetOopsStack);
  3113     _markStack.oops_do(&cl);
  3115     // Verify entries on the task queues
  3116     for (uint i = 0; i < _max_worker_id; i += 1) {
  3117       cl.set_phase(VerifyNoCSetOopsQueues, i);
  3118       CMTaskQueue* queue = _task_queues->queue(i);
  3119       queue->oops_do(&cl);
  3123   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
  3125   // Verify entries on the enqueued SATB buffers
  3126   if (verify_enqueued_buffers) {
  3127     cl.set_phase(VerifyNoCSetOopsSATBCompleted);
  3128     satb_qs.iterate_completed_buffers_read_only(&cl);
  3131   // Verify entries on the per-thread SATB buffers
  3132   if (verify_thread_buffers) {
  3133     cl.set_phase(VerifyNoCSetOopsSATBThread);
  3134     satb_qs.iterate_thread_buffers_read_only(&cl);
  3137   if (verify_fingers) {
  3138     // Verify the global finger
  3139     HeapWord* global_finger = finger();
  3140     if (global_finger != NULL && global_finger < _heap_end) {
  3141       // The global finger always points to a heap region boundary. We
  3142       // use heap_region_containing_raw() to get the containing region
  3143       // given that the global finger could be pointing to a free region
  3144       // which subsequently becomes continues humongous. If that
  3145       // happens, heap_region_containing() will return the bottom of the
  3146       // corresponding starts humongous region and the check below will
  3147       // not hold any more.
  3148       // Since we always iterate over all regions, we might get a NULL HeapRegion
  3149       // here.
  3150       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
  3151       guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
  3152                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
  3153                         p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
  3156     // Verify the task fingers
  3157     assert(parallel_marking_threads() <= _max_worker_id, "sanity");
  3158     for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
  3159       CMTask* task = _tasks[i];
  3160       HeapWord* task_finger = task->finger();
  3161       if (task_finger != NULL && task_finger < _heap_end) {
  3162         // See above note on the global finger verification.
  3163         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
  3164         guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
  3165                   !task_hr->in_collection_set(),
  3166                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
  3167                           p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
  3172 #endif // PRODUCT
  3174 // Aggregate the counting data that was constructed concurrently
  3175 // with marking.
  3176 class AggregateCountDataHRClosure: public HeapRegionClosure {
  3177   G1CollectedHeap* _g1h;
  3178   ConcurrentMark* _cm;
  3179   CardTableModRefBS* _ct_bs;
  3180   BitMap* _cm_card_bm;
  3181   uint _max_worker_id;
  3183  public:
  3184   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
  3185                               BitMap* cm_card_bm,
  3186                               uint max_worker_id) :
  3187     _g1h(g1h), _cm(g1h->concurrent_mark()),
  3188     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
  3189     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
  3191   bool doHeapRegion(HeapRegion* hr) {
  3192     if (hr->continuesHumongous()) {
  3193       // We will ignore these here and process them when their
  3194       // associated "starts humongous" region is processed.
  3195       // Note that we cannot rely on their associated
  3196       // "starts humongous" region to have their bit set to 1
  3197       // since, due to the region chunking in the parallel region
  3198       // iteration, a "continues humongous" region might be visited
  3199       // before its associated "starts humongous".
  3200       return false;
  3203     HeapWord* start = hr->bottom();
  3204     HeapWord* limit = hr->next_top_at_mark_start();
  3205     HeapWord* end = hr->end();
  3207     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
  3208            err_msg("Preconditions not met - "
  3209                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
  3210                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
  3211                    p2i(start), p2i(limit), p2i(hr->top()), p2i(hr->end())));
  3213     assert(hr->next_marked_bytes() == 0, "Precondition");
  3215     if (start == limit) {
  3216       // NTAMS of this region has not been set so nothing to do.
  3217       return false;
  3220     // 'start' should be in the heap.
  3221     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
  3222     // 'end' *may* be just beyone the end of the heap (if hr is the last region)
  3223     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
  3225     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
  3226     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
  3227     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
  3229     // If ntams is not card aligned then we bump card bitmap index
  3230     // for limit so that we get the all the cards spanned by
  3231     // the object ending at ntams.
  3232     // Note: if this is the last region in the heap then ntams
  3233     // could be actually just beyond the end of the the heap;
  3234     // limit_idx will then  correspond to a (non-existent) card
  3235     // that is also outside the heap.
  3236     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
  3237       limit_idx += 1;
  3240     assert(limit_idx <= end_idx, "or else use atomics");
  3242     // Aggregate the "stripe" in the count data associated with hr.
  3243     uint hrm_index = hr->hrm_index();
  3244     size_t marked_bytes = 0;
  3246     for (uint i = 0; i < _max_worker_id; i += 1) {
  3247       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
  3248       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
  3250       // Fetch the marked_bytes in this region for task i and
  3251       // add it to the running total for this region.
  3252       marked_bytes += marked_bytes_array[hrm_index];
  3254       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
  3255       // into the global card bitmap.
  3256       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
  3258       while (scan_idx < limit_idx) {
  3259         assert(task_card_bm->at(scan_idx) == true, "should be");
  3260         _cm_card_bm->set_bit(scan_idx);
  3261         assert(_cm_card_bm->at(scan_idx) == true, "should be");
  3263         // BitMap::get_next_one_offset() can handle the case when
  3264         // its left_offset parameter is greater than its right_offset
  3265         // parameter. It does, however, have an early exit if
  3266         // left_offset == right_offset. So let's limit the value
  3267         // passed in for left offset here.
  3268         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
  3269         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
  3273     // Update the marked bytes for this region.
  3274     hr->add_to_marked_bytes(marked_bytes);
  3276     // Next heap region
  3277     return false;
  3279 };
  3281 class G1AggregateCountDataTask: public AbstractGangTask {
  3282 protected:
  3283   G1CollectedHeap* _g1h;
  3284   ConcurrentMark* _cm;
  3285   BitMap* _cm_card_bm;
  3286   uint _max_worker_id;
  3287   int _active_workers;
  3289 public:
  3290   G1AggregateCountDataTask(G1CollectedHeap* g1h,
  3291                            ConcurrentMark* cm,
  3292                            BitMap* cm_card_bm,
  3293                            uint max_worker_id,
  3294                            int n_workers) :
  3295     AbstractGangTask("Count Aggregation"),
  3296     _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
  3297     _max_worker_id(max_worker_id),
  3298     _active_workers(n_workers) { }
  3300   void work(uint worker_id) {
  3301     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
  3303     if (G1CollectedHeap::use_parallel_gc_threads()) {
  3304       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
  3305                                             _active_workers,
  3306                                             HeapRegion::AggregateCountClaimValue);
  3307     } else {
  3308       _g1h->heap_region_iterate(&cl);
  3311 };
  3314 void ConcurrentMark::aggregate_count_data() {
  3315   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  3316                         _g1h->workers()->active_workers() :
  3317                         1);
  3319   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
  3320                                            _max_worker_id, n_workers);
  3322   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3323     assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3324            "sanity check");
  3325     _g1h->set_par_threads(n_workers);
  3326     _g1h->workers()->run_task(&g1_par_agg_task);
  3327     _g1h->set_par_threads(0);
  3329     assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
  3330            "sanity check");
  3331     _g1h->reset_heap_region_claim_values();
  3332   } else {
  3333     g1_par_agg_task.work(0);
  3335   _g1h->allocation_context_stats().update_at_remark();
  3338 // Clear the per-worker arrays used to store the per-region counting data
  3339 void ConcurrentMark::clear_all_count_data() {
  3340   // Clear the global card bitmap - it will be filled during
  3341   // liveness count aggregation (during remark) and the
  3342   // final counting task.
  3343   _card_bm.clear();
  3345   // Clear the global region bitmap - it will be filled as part
  3346   // of the final counting task.
  3347   _region_bm.clear();
  3349   uint max_regions = _g1h->max_regions();
  3350   assert(_max_worker_id > 0, "uninitialized");
  3352   for (uint i = 0; i < _max_worker_id; i += 1) {
  3353     BitMap* task_card_bm = count_card_bitmap_for(i);
  3354     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
  3356     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
  3357     assert(marked_bytes_array != NULL, "uninitialized");
  3359     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
  3360     task_card_bm->clear();
  3364 void ConcurrentMark::print_stats() {
  3365   if (verbose_stats()) {
  3366     gclog_or_tty->print_cr("---------------------------------------------------------------------");
  3367     for (size_t i = 0; i < _active_tasks; ++i) {
  3368       _tasks[i]->print_stats();
  3369       gclog_or_tty->print_cr("---------------------------------------------------------------------");
  3374 // abandon current marking iteration due to a Full GC
  3375 void ConcurrentMark::abort() {
  3376   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
  3377   // concurrent bitmap clearing.
  3378   _nextMarkBitMap->clearAll();
  3380   // Note we cannot clear the previous marking bitmap here
  3381   // since VerifyDuringGC verifies the objects marked during
  3382   // a full GC against the previous bitmap.
  3384   // Clear the liveness counting data
  3385   clear_all_count_data();
  3386   // Empty mark stack
  3387   reset_marking_state();
  3388   for (uint i = 0; i < _max_worker_id; ++i) {
  3389     _tasks[i]->clear_region_fields();
  3391   _first_overflow_barrier_sync.abort();
  3392   _second_overflow_barrier_sync.abort();
  3393   const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
  3394   if (!gc_id.is_undefined()) {
  3395     // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
  3396     // to detect that it was aborted. Only keep track of the first GC id that we aborted.
  3397     _aborted_gc_id = gc_id;
  3399   _has_aborted = true;
  3401   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  3402   satb_mq_set.abandon_partial_marking();
  3403   // This can be called either during or outside marking, we'll read
  3404   // the expected_active value from the SATB queue set.
  3405   satb_mq_set.set_active_all_threads(
  3406                                  false, /* new active value */
  3407                                  satb_mq_set.is_active() /* expected_active */);
  3409   _g1h->trace_heap_after_concurrent_cycle();
  3410   _g1h->register_concurrent_cycle_end();
  3413 const GCId& ConcurrentMark::concurrent_gc_id() {
  3414   if (has_aborted()) {
  3415     return _aborted_gc_id;
  3417   return _g1h->gc_tracer_cm()->gc_id();
  3420 static void print_ms_time_info(const char* prefix, const char* name,
  3421                                NumberSeq& ns) {
  3422   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
  3423                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
  3424   if (ns.num() > 0) {
  3425     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
  3426                            prefix, ns.sd(), ns.maximum());
  3430 void ConcurrentMark::print_summary_info() {
  3431   gclog_or_tty->print_cr(" Concurrent marking:");
  3432   print_ms_time_info("  ", "init marks", _init_times);
  3433   print_ms_time_info("  ", "remarks", _remark_times);
  3435     print_ms_time_info("     ", "final marks", _remark_mark_times);
  3436     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
  3439   print_ms_time_info("  ", "cleanups", _cleanup_times);
  3440   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
  3441                          _total_counting_time,
  3442                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
  3443                           (double)_cleanup_times.num()
  3444                          : 0.0));
  3445   if (G1ScrubRemSets) {
  3446     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
  3447                            _total_rs_scrub_time,
  3448                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
  3449                             (double)_cleanup_times.num()
  3450                            : 0.0));
  3452   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
  3453                          (_init_times.sum() + _remark_times.sum() +
  3454                           _cleanup_times.sum())/1000.0);
  3455   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
  3456                 "(%8.2f s marking).",
  3457                 cmThread()->vtime_accum(),
  3458                 cmThread()->vtime_mark_accum());
  3461 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
  3462   if (use_parallel_marking_threads()) {
  3463     _parallel_workers->print_worker_threads_on(st);
  3467 void ConcurrentMark::print_on_error(outputStream* st) const {
  3468   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
  3469       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
  3470   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
  3471   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
  3474 // We take a break if someone is trying to stop the world.
  3475 bool ConcurrentMark::do_yield_check(uint worker_id) {
  3476   if (SuspendibleThreadSet::should_yield()) {
  3477     if (worker_id == 0) {
  3478       _g1h->g1_policy()->record_concurrent_pause();
  3480     SuspendibleThreadSet::yield();
  3481     return true;
  3482   } else {
  3483     return false;
  3487 #ifndef PRODUCT
  3488 // for debugging purposes
  3489 void ConcurrentMark::print_finger() {
  3490   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
  3491                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
  3492   for (uint i = 0; i < _max_worker_id; ++i) {
  3493     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
  3495   gclog_or_tty->cr();
  3497 #endif
  3499 void CMTask::scan_object(oop obj) {
  3500   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
  3502   if (_cm->verbose_high()) {
  3503     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
  3504                            _worker_id, p2i((void*) obj));
  3507   size_t obj_size = obj->size();
  3508   _words_scanned += obj_size;
  3510   obj->oop_iterate(_cm_oop_closure);
  3511   statsOnly( ++_objs_scanned );
  3512   check_limits();
  3515 // Closure for iteration over bitmaps
  3516 class CMBitMapClosure : public BitMapClosure {
  3517 private:
  3518   // the bitmap that is being iterated over
  3519   CMBitMap*                   _nextMarkBitMap;
  3520   ConcurrentMark*             _cm;
  3521   CMTask*                     _task;
  3523 public:
  3524   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
  3525     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
  3527   bool do_bit(size_t offset) {
  3528     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
  3529     assert(_nextMarkBitMap->isMarked(addr), "invariant");
  3530     assert( addr < _cm->finger(), "invariant");
  3532     statsOnly( _task->increase_objs_found_on_bitmap() );
  3533     assert(addr >= _task->finger(), "invariant");
  3535     // We move that task's local finger along.
  3536     _task->move_finger_to(addr);
  3538     _task->scan_object(oop(addr));
  3539     // we only partially drain the local queue and global stack
  3540     _task->drain_local_queue(true);
  3541     _task->drain_global_stack(true);
  3543     // if the has_aborted flag has been raised, we need to bail out of
  3544     // the iteration
  3545     return !_task->has_aborted();
  3547 };
  3549 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
  3550                                ConcurrentMark* cm,
  3551                                CMTask* task)
  3552   : _g1h(g1h), _cm(cm), _task(task) {
  3553   assert(_ref_processor == NULL, "should be initialized to NULL");
  3555   if (G1UseConcMarkReferenceProcessing) {
  3556     _ref_processor = g1h->ref_processor_cm();
  3557     assert(_ref_processor != NULL, "should not be NULL");
  3561 void CMTask::setup_for_region(HeapRegion* hr) {
  3562   assert(hr != NULL,
  3563         "claim_region() should have filtered out NULL regions");
  3564   assert(!hr->continuesHumongous(),
  3565         "claim_region() should have filtered out continues humongous regions");
  3567   if (_cm->verbose_low()) {
  3568     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
  3569                            _worker_id, p2i(hr));
  3572   _curr_region  = hr;
  3573   _finger       = hr->bottom();
  3574   update_region_limit();
  3577 void CMTask::update_region_limit() {
  3578   HeapRegion* hr            = _curr_region;
  3579   HeapWord* bottom          = hr->bottom();
  3580   HeapWord* limit           = hr->next_top_at_mark_start();
  3582   if (limit == bottom) {
  3583     if (_cm->verbose_low()) {
  3584       gclog_or_tty->print_cr("[%u] found an empty region "
  3585                              "["PTR_FORMAT", "PTR_FORMAT")",
  3586                              _worker_id, p2i(bottom), p2i(limit));
  3588     // The region was collected underneath our feet.
  3589     // We set the finger to bottom to ensure that the bitmap
  3590     // iteration that will follow this will not do anything.
  3591     // (this is not a condition that holds when we set the region up,
  3592     // as the region is not supposed to be empty in the first place)
  3593     _finger = bottom;
  3594   } else if (limit >= _region_limit) {
  3595     assert(limit >= _finger, "peace of mind");
  3596   } else {
  3597     assert(limit < _region_limit, "only way to get here");
  3598     // This can happen under some pretty unusual circumstances.  An
  3599     // evacuation pause empties the region underneath our feet (NTAMS
  3600     // at bottom). We then do some allocation in the region (NTAMS
  3601     // stays at bottom), followed by the region being used as a GC
  3602     // alloc region (NTAMS will move to top() and the objects
  3603     // originally below it will be grayed). All objects now marked in
  3604     // the region are explicitly grayed, if below the global finger,
  3605     // and we do not need in fact to scan anything else. So, we simply
  3606     // set _finger to be limit to ensure that the bitmap iteration
  3607     // doesn't do anything.
  3608     _finger = limit;
  3611   _region_limit = limit;
  3614 void CMTask::giveup_current_region() {
  3615   assert(_curr_region != NULL, "invariant");
  3616   if (_cm->verbose_low()) {
  3617     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
  3618                            _worker_id, p2i(_curr_region));
  3620   clear_region_fields();
  3623 void CMTask::clear_region_fields() {
  3624   // Values for these three fields that indicate that we're not
  3625   // holding on to a region.
  3626   _curr_region   = NULL;
  3627   _finger        = NULL;
  3628   _region_limit  = NULL;
  3631 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
  3632   if (cm_oop_closure == NULL) {
  3633     assert(_cm_oop_closure != NULL, "invariant");
  3634   } else {
  3635     assert(_cm_oop_closure == NULL, "invariant");
  3637   _cm_oop_closure = cm_oop_closure;
  3640 void CMTask::reset(CMBitMap* nextMarkBitMap) {
  3641   guarantee(nextMarkBitMap != NULL, "invariant");
  3643   if (_cm->verbose_low()) {
  3644     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
  3647   _nextMarkBitMap                = nextMarkBitMap;
  3648   clear_region_fields();
  3650   _calls                         = 0;
  3651   _elapsed_time_ms               = 0.0;
  3652   _termination_time_ms           = 0.0;
  3653   _termination_start_time_ms     = 0.0;
  3655 #if _MARKING_STATS_
  3656   _local_pushes                  = 0;
  3657   _local_pops                    = 0;
  3658   _local_max_size                = 0;
  3659   _objs_scanned                  = 0;
  3660   _global_pushes                 = 0;
  3661   _global_pops                   = 0;
  3662   _global_max_size               = 0;
  3663   _global_transfers_to           = 0;
  3664   _global_transfers_from         = 0;
  3665   _regions_claimed               = 0;
  3666   _objs_found_on_bitmap          = 0;
  3667   _satb_buffers_processed        = 0;
  3668   _steal_attempts                = 0;
  3669   _steals                        = 0;
  3670   _aborted                       = 0;
  3671   _aborted_overflow              = 0;
  3672   _aborted_cm_aborted            = 0;
  3673   _aborted_yield                 = 0;
  3674   _aborted_timed_out             = 0;
  3675   _aborted_satb                  = 0;
  3676   _aborted_termination           = 0;
  3677 #endif // _MARKING_STATS_
  3680 bool CMTask::should_exit_termination() {
  3681   regular_clock_call();
  3682   // This is called when we are in the termination protocol. We should
  3683   // quit if, for some reason, this task wants to abort or the global
  3684   // stack is not empty (this means that we can get work from it).
  3685   return !_cm->mark_stack_empty() || has_aborted();
  3688 void CMTask::reached_limit() {
  3689   assert(_words_scanned >= _words_scanned_limit ||
  3690          _refs_reached >= _refs_reached_limit ,
  3691          "shouldn't have been called otherwise");
  3692   regular_clock_call();
  3695 void CMTask::regular_clock_call() {
  3696   if (has_aborted()) return;
  3698   // First, we need to recalculate the words scanned and refs reached
  3699   // limits for the next clock call.
  3700   recalculate_limits();
  3702   // During the regular clock call we do the following
  3704   // (1) If an overflow has been flagged, then we abort.
  3705   if (_cm->has_overflown()) {
  3706     set_has_aborted();
  3707     return;
  3710   // If we are not concurrent (i.e. we're doing remark) we don't need
  3711   // to check anything else. The other steps are only needed during
  3712   // the concurrent marking phase.
  3713   if (!concurrent()) return;
  3715   // (2) If marking has been aborted for Full GC, then we also abort.
  3716   if (_cm->has_aborted()) {
  3717     set_has_aborted();
  3718     statsOnly( ++_aborted_cm_aborted );
  3719     return;
  3722   double curr_time_ms = os::elapsedVTime() * 1000.0;
  3724   // (3) If marking stats are enabled, then we update the step history.
  3725 #if _MARKING_STATS_
  3726   if (_words_scanned >= _words_scanned_limit) {
  3727     ++_clock_due_to_scanning;
  3729   if (_refs_reached >= _refs_reached_limit) {
  3730     ++_clock_due_to_marking;
  3733   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
  3734   _interval_start_time_ms = curr_time_ms;
  3735   _all_clock_intervals_ms.add(last_interval_ms);
  3737   if (_cm->verbose_medium()) {
  3738       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
  3739                         "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
  3740                         _worker_id, last_interval_ms,
  3741                         _words_scanned,
  3742                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
  3743                         _refs_reached,
  3744                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
  3746 #endif // _MARKING_STATS_
  3748   // (4) We check whether we should yield. If we have to, then we abort.
  3749   if (SuspendibleThreadSet::should_yield()) {
  3750     // We should yield. To do this we abort the task. The caller is
  3751     // responsible for yielding.
  3752     set_has_aborted();
  3753     statsOnly( ++_aborted_yield );
  3754     return;
  3757   // (5) We check whether we've reached our time quota. If we have,
  3758   // then we abort.
  3759   double elapsed_time_ms = curr_time_ms - _start_time_ms;
  3760   if (elapsed_time_ms > _time_target_ms) {
  3761     set_has_aborted();
  3762     _has_timed_out = true;
  3763     statsOnly( ++_aborted_timed_out );
  3764     return;
  3767   // (6) Finally, we check whether there are enough completed STAB
  3768   // buffers available for processing. If there are, we abort.
  3769   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  3770   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
  3771     if (_cm->verbose_low()) {
  3772       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
  3773                              _worker_id);
  3775     // we do need to process SATB buffers, we'll abort and restart
  3776     // the marking task to do so
  3777     set_has_aborted();
  3778     statsOnly( ++_aborted_satb );
  3779     return;
  3783 void CMTask::recalculate_limits() {
  3784   _real_words_scanned_limit = _words_scanned + words_scanned_period;
  3785   _words_scanned_limit      = _real_words_scanned_limit;
  3787   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
  3788   _refs_reached_limit       = _real_refs_reached_limit;
  3791 void CMTask::decrease_limits() {
  3792   // This is called when we believe that we're going to do an infrequent
  3793   // operation which will increase the per byte scanned cost (i.e. move
  3794   // entries to/from the global stack). It basically tries to decrease the
  3795   // scanning limit so that the clock is called earlier.
  3797   if (_cm->verbose_medium()) {
  3798     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
  3801   _words_scanned_limit = _real_words_scanned_limit -
  3802     3 * words_scanned_period / 4;
  3803   _refs_reached_limit  = _real_refs_reached_limit -
  3804     3 * refs_reached_period / 4;
  3807 void CMTask::move_entries_to_global_stack() {
  3808   // local array where we'll store the entries that will be popped
  3809   // from the local queue
  3810   oop buffer[global_stack_transfer_size];
  3812   int n = 0;
  3813   oop obj;
  3814   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
  3815     buffer[n] = obj;
  3816     ++n;
  3819   if (n > 0) {
  3820     // we popped at least one entry from the local queue
  3822     statsOnly( ++_global_transfers_to; _local_pops += n );
  3824     if (!_cm->mark_stack_push(buffer, n)) {
  3825       if (_cm->verbose_low()) {
  3826         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
  3827                                _worker_id);
  3829       set_has_aborted();
  3830     } else {
  3831       // the transfer was successful
  3833       if (_cm->verbose_medium()) {
  3834         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
  3835                                _worker_id, n);
  3837       statsOnly( int tmp_size = _cm->mark_stack_size();
  3838                  if (tmp_size > _global_max_size) {
  3839                    _global_max_size = tmp_size;
  3841                  _global_pushes += n );
  3845   // this operation was quite expensive, so decrease the limits
  3846   decrease_limits();
  3849 void CMTask::get_entries_from_global_stack() {
  3850   // local array where we'll store the entries that will be popped
  3851   // from the global stack.
  3852   oop buffer[global_stack_transfer_size];
  3853   int n;
  3854   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
  3855   assert(n <= global_stack_transfer_size,
  3856          "we should not pop more than the given limit");
  3857   if (n > 0) {
  3858     // yes, we did actually pop at least one entry
  3860     statsOnly( ++_global_transfers_from; _global_pops += n );
  3861     if (_cm->verbose_medium()) {
  3862       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
  3863                              _worker_id, n);
  3865     for (int i = 0; i < n; ++i) {
  3866       bool success = _task_queue->push(buffer[i]);
  3867       // We only call this when the local queue is empty or under a
  3868       // given target limit. So, we do not expect this push to fail.
  3869       assert(success, "invariant");
  3872     statsOnly( int tmp_size = _task_queue->size();
  3873                if (tmp_size > _local_max_size) {
  3874                  _local_max_size = tmp_size;
  3876                _local_pushes += n );
  3879   // this operation was quite expensive, so decrease the limits
  3880   decrease_limits();
  3883 void CMTask::drain_local_queue(bool partially) {
  3884   if (has_aborted()) return;
  3886   // Decide what the target size is, depending whether we're going to
  3887   // drain it partially (so that other tasks can steal if they run out
  3888   // of things to do) or totally (at the very end).
  3889   size_t target_size;
  3890   if (partially) {
  3891     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
  3892   } else {
  3893     target_size = 0;
  3896   if (_task_queue->size() > target_size) {
  3897     if (_cm->verbose_high()) {
  3898       gclog_or_tty->print_cr("[%u] draining local queue, target size = " SIZE_FORMAT,
  3899                              _worker_id, target_size);
  3902     oop obj;
  3903     bool ret = _task_queue->pop_local(obj);
  3904     while (ret) {
  3905       statsOnly( ++_local_pops );
  3907       if (_cm->verbose_high()) {
  3908         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
  3909                                p2i((void*) obj));
  3912       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
  3913       assert(!_g1h->is_on_master_free_list(
  3914                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
  3916       scan_object(obj);
  3918       if (_task_queue->size() <= target_size || has_aborted()) {
  3919         ret = false;
  3920       } else {
  3921         ret = _task_queue->pop_local(obj);
  3925     if (_cm->verbose_high()) {
  3926       gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
  3927                              _worker_id, _task_queue->size());
  3932 void CMTask::drain_global_stack(bool partially) {
  3933   if (has_aborted()) return;
  3935   // We have a policy to drain the local queue before we attempt to
  3936   // drain the global stack.
  3937   assert(partially || _task_queue->size() == 0, "invariant");
  3939   // Decide what the target size is, depending whether we're going to
  3940   // drain it partially (so that other tasks can steal if they run out
  3941   // of things to do) or totally (at the very end).  Notice that,
  3942   // because we move entries from the global stack in chunks or
  3943   // because another task might be doing the same, we might in fact
  3944   // drop below the target. But, this is not a problem.
  3945   size_t target_size;
  3946   if (partially) {
  3947     target_size = _cm->partial_mark_stack_size_target();
  3948   } else {
  3949     target_size = 0;
  3952   if (_cm->mark_stack_size() > target_size) {
  3953     if (_cm->verbose_low()) {
  3954       gclog_or_tty->print_cr("[%u] draining global_stack, target size " SIZE_FORMAT,
  3955                              _worker_id, target_size);
  3958     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
  3959       get_entries_from_global_stack();
  3960       drain_local_queue(partially);
  3963     if (_cm->verbose_low()) {
  3964       gclog_or_tty->print_cr("[%u] drained global stack, size = " SIZE_FORMAT,
  3965                              _worker_id, _cm->mark_stack_size());
  3970 // SATB Queue has several assumptions on whether to call the par or
  3971 // non-par versions of the methods. this is why some of the code is
  3972 // replicated. We should really get rid of the single-threaded version
  3973 // of the code to simplify things.
  3974 void CMTask::drain_satb_buffers() {
  3975   if (has_aborted()) return;
  3977   // We set this so that the regular clock knows that we're in the
  3978   // middle of draining buffers and doesn't set the abort flag when it
  3979   // notices that SATB buffers are available for draining. It'd be
  3980   // very counter productive if it did that. :-)
  3981   _draining_satb_buffers = true;
  3983   CMObjectClosure oc(this);
  3984   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  3985   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3986     satb_mq_set.set_par_closure(_worker_id, &oc);
  3987   } else {
  3988     satb_mq_set.set_closure(&oc);
  3991   // This keeps claiming and applying the closure to completed buffers
  3992   // until we run out of buffers or we need to abort.
  3993   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3994     while (!has_aborted() &&
  3995            satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
  3996       if (_cm->verbose_medium()) {
  3997         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
  3999       statsOnly( ++_satb_buffers_processed );
  4000       regular_clock_call();
  4002   } else {
  4003     while (!has_aborted() &&
  4004            satb_mq_set.apply_closure_to_completed_buffer()) {
  4005       if (_cm->verbose_medium()) {
  4006         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
  4008       statsOnly( ++_satb_buffers_processed );
  4009       regular_clock_call();
  4013   _draining_satb_buffers = false;
  4015   assert(has_aborted() ||
  4016          concurrent() ||
  4017          satb_mq_set.completed_buffers_num() == 0, "invariant");
  4019   if (G1CollectedHeap::use_parallel_gc_threads()) {
  4020     satb_mq_set.set_par_closure(_worker_id, NULL);
  4021   } else {
  4022     satb_mq_set.set_closure(NULL);
  4025   // again, this was a potentially expensive operation, decrease the
  4026   // limits to get the regular clock call early
  4027   decrease_limits();
  4030 void CMTask::print_stats() {
  4031   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
  4032                          _worker_id, _calls);
  4033   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
  4034                          _elapsed_time_ms, _termination_time_ms);
  4035   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
  4036                          _step_times_ms.num(), _step_times_ms.avg(),
  4037                          _step_times_ms.sd());
  4038   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
  4039                          _step_times_ms.maximum(), _step_times_ms.sum());
  4041 #if _MARKING_STATS_
  4042   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
  4043                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
  4044                          _all_clock_intervals_ms.sd());
  4045   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
  4046                          _all_clock_intervals_ms.maximum(),
  4047                          _all_clock_intervals_ms.sum());
  4048   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = %d, marking = %d",
  4049                          _clock_due_to_scanning, _clock_due_to_marking);
  4050   gclog_or_tty->print_cr("  Objects: scanned = %d, found on the bitmap = %d",
  4051                          _objs_scanned, _objs_found_on_bitmap);
  4052   gclog_or_tty->print_cr("  Local Queue:  pushes = %d, pops = %d, max size = %d",
  4053                          _local_pushes, _local_pops, _local_max_size);
  4054   gclog_or_tty->print_cr("  Global Stack: pushes = %d, pops = %d, max size = %d",
  4055                          _global_pushes, _global_pops, _global_max_size);
  4056   gclog_or_tty->print_cr("                transfers to = %d, transfers from = %d",
  4057                          _global_transfers_to,_global_transfers_from);
  4058   gclog_or_tty->print_cr("  Regions: claimed = %d", _regions_claimed);
  4059   gclog_or_tty->print_cr("  SATB buffers: processed = %d", _satb_buffers_processed);
  4060   gclog_or_tty->print_cr("  Steals: attempts = %d, successes = %d",
  4061                          _steal_attempts, _steals);
  4062   gclog_or_tty->print_cr("  Aborted: %d, due to", _aborted);
  4063   gclog_or_tty->print_cr("    overflow: %d, global abort: %d, yield: %d",
  4064                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
  4065   gclog_or_tty->print_cr("    time out: %d, SATB: %d, termination: %d",
  4066                          _aborted_timed_out, _aborted_satb, _aborted_termination);
  4067 #endif // _MARKING_STATS_
  4070 /*****************************************************************************
  4072     The do_marking_step(time_target_ms, ...) method is the building
  4073     block of the parallel marking framework. It can be called in parallel
  4074     with other invocations of do_marking_step() on different tasks
  4075     (but only one per task, obviously) and concurrently with the
  4076     mutator threads, or during remark, hence it eliminates the need
  4077     for two versions of the code. When called during remark, it will
  4078     pick up from where the task left off during the concurrent marking
  4079     phase. Interestingly, tasks are also claimable during evacuation
  4080     pauses too, since do_marking_step() ensures that it aborts before
  4081     it needs to yield.
  4083     The data structures that it uses to do marking work are the
  4084     following:
  4086       (1) Marking Bitmap. If there are gray objects that appear only
  4087       on the bitmap (this happens either when dealing with an overflow
  4088       or when the initial marking phase has simply marked the roots
  4089       and didn't push them on the stack), then tasks claim heap
  4090       regions whose bitmap they then scan to find gray objects. A
  4091       global finger indicates where the end of the last claimed region
  4092       is. A local finger indicates how far into the region a task has
  4093       scanned. The two fingers are used to determine how to gray an
  4094       object (i.e. whether simply marking it is OK, as it will be
  4095       visited by a task in the future, or whether it needs to be also
  4096       pushed on a stack).
  4098       (2) Local Queue. The local queue of the task which is accessed
  4099       reasonably efficiently by the task. Other tasks can steal from
  4100       it when they run out of work. Throughout the marking phase, a
  4101       task attempts to keep its local queue short but not totally
  4102       empty, so that entries are available for stealing by other
  4103       tasks. Only when there is no more work, a task will totally
  4104       drain its local queue.
  4106       (3) Global Mark Stack. This handles local queue overflow. During
  4107       marking only sets of entries are moved between it and the local
  4108       queues, as access to it requires a mutex and more fine-grain
  4109       interaction with it which might cause contention. If it
  4110       overflows, then the marking phase should restart and iterate
  4111       over the bitmap to identify gray objects. Throughout the marking
  4112       phase, tasks attempt to keep the global mark stack at a small
  4113       length but not totally empty, so that entries are available for
  4114       popping by other tasks. Only when there is no more work, tasks
  4115       will totally drain the global mark stack.
  4117       (4) SATB Buffer Queue. This is where completed SATB buffers are
  4118       made available. Buffers are regularly removed from this queue
  4119       and scanned for roots, so that the queue doesn't get too
  4120       long. During remark, all completed buffers are processed, as
  4121       well as the filled in parts of any uncompleted buffers.
  4123     The do_marking_step() method tries to abort when the time target
  4124     has been reached. There are a few other cases when the
  4125     do_marking_step() method also aborts:
  4127       (1) When the marking phase has been aborted (after a Full GC).
  4129       (2) When a global overflow (on the global stack) has been
  4130       triggered. Before the task aborts, it will actually sync up with
  4131       the other tasks to ensure that all the marking data structures
  4132       (local queues, stacks, fingers etc.)  are re-initialized so that
  4133       when do_marking_step() completes, the marking phase can
  4134       immediately restart.
  4136       (3) When enough completed SATB buffers are available. The
  4137       do_marking_step() method only tries to drain SATB buffers right
  4138       at the beginning. So, if enough buffers are available, the
  4139       marking step aborts and the SATB buffers are processed at
  4140       the beginning of the next invocation.
  4142       (4) To yield. when we have to yield then we abort and yield
  4143       right at the end of do_marking_step(). This saves us from a lot
  4144       of hassle as, by yielding we might allow a Full GC. If this
  4145       happens then objects will be compacted underneath our feet, the
  4146       heap might shrink, etc. We save checking for this by just
  4147       aborting and doing the yield right at the end.
  4149     From the above it follows that the do_marking_step() method should
  4150     be called in a loop (or, otherwise, regularly) until it completes.
  4152     If a marking step completes without its has_aborted() flag being
  4153     true, it means it has completed the current marking phase (and
  4154     also all other marking tasks have done so and have all synced up).
  4156     A method called regular_clock_call() is invoked "regularly" (in
  4157     sub ms intervals) throughout marking. It is this clock method that
  4158     checks all the abort conditions which were mentioned above and
  4159     decides when the task should abort. A work-based scheme is used to
  4160     trigger this clock method: when the number of object words the
  4161     marking phase has scanned or the number of references the marking
  4162     phase has visited reach a given limit. Additional invocations to
  4163     the method clock have been planted in a few other strategic places
  4164     too. The initial reason for the clock method was to avoid calling
  4165     vtime too regularly, as it is quite expensive. So, once it was in
  4166     place, it was natural to piggy-back all the other conditions on it
  4167     too and not constantly check them throughout the code.
  4169     If do_termination is true then do_marking_step will enter its
  4170     termination protocol.
  4172     The value of is_serial must be true when do_marking_step is being
  4173     called serially (i.e. by the VMThread) and do_marking_step should
  4174     skip any synchronization in the termination and overflow code.
  4175     Examples include the serial remark code and the serial reference
  4176     processing closures.
  4178     The value of is_serial must be false when do_marking_step is
  4179     being called by any of the worker threads in a work gang.
  4180     Examples include the concurrent marking code (CMMarkingTask),
  4181     the MT remark code, and the MT reference processing closures.
  4183  *****************************************************************************/
  4185 void CMTask::do_marking_step(double time_target_ms,
  4186                              bool do_termination,
  4187                              bool is_serial) {
  4188   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
  4189   assert(concurrent() == _cm->concurrent(), "they should be the same");
  4191   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
  4192   assert(_task_queues != NULL, "invariant");
  4193   assert(_task_queue != NULL, "invariant");
  4194   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
  4196   assert(!_claimed,
  4197          "only one thread should claim this task at any one time");
  4199   // OK, this doesn't safeguard again all possible scenarios, as it is
  4200   // possible for two threads to set the _claimed flag at the same
  4201   // time. But it is only for debugging purposes anyway and it will
  4202   // catch most problems.
  4203   _claimed = true;
  4205   _start_time_ms = os::elapsedVTime() * 1000.0;
  4206   statsOnly( _interval_start_time_ms = _start_time_ms );
  4208   // If do_stealing is true then do_marking_step will attempt to
  4209   // steal work from the other CMTasks. It only makes sense to
  4210   // enable stealing when the termination protocol is enabled
  4211   // and do_marking_step() is not being called serially.
  4212   bool do_stealing = do_termination && !is_serial;
  4214   double diff_prediction_ms =
  4215     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
  4216   _time_target_ms = time_target_ms - diff_prediction_ms;
  4218   // set up the variables that are used in the work-based scheme to
  4219   // call the regular clock method
  4220   _words_scanned = 0;
  4221   _refs_reached  = 0;
  4222   recalculate_limits();
  4224   // clear all flags
  4225   clear_has_aborted();
  4226   _has_timed_out = false;
  4227   _draining_satb_buffers = false;
  4229   ++_calls;
  4231   if (_cm->verbose_low()) {
  4232     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
  4233                            "target = %1.2lfms >>>>>>>>>>",
  4234                            _worker_id, _calls, _time_target_ms);
  4237   // Set up the bitmap and oop closures. Anything that uses them is
  4238   // eventually called from this method, so it is OK to allocate these
  4239   // statically.
  4240   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
  4241   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
  4242   set_cm_oop_closure(&cm_oop_closure);
  4244   if (_cm->has_overflown()) {
  4245     // This can happen if the mark stack overflows during a GC pause
  4246     // and this task, after a yield point, restarts. We have to abort
  4247     // as we need to get into the overflow protocol which happens
  4248     // right at the end of this task.
  4249     set_has_aborted();
  4252   // First drain any available SATB buffers. After this, we will not
  4253   // look at SATB buffers before the next invocation of this method.
  4254   // If enough completed SATB buffers are queued up, the regular clock
  4255   // will abort this task so that it restarts.
  4256   drain_satb_buffers();
  4257   // ...then partially drain the local queue and the global stack
  4258   drain_local_queue(true);
  4259   drain_global_stack(true);
  4261   do {
  4262     if (!has_aborted() && _curr_region != NULL) {
  4263       // This means that we're already holding on to a region.
  4264       assert(_finger != NULL, "if region is not NULL, then the finger "
  4265              "should not be NULL either");
  4267       // We might have restarted this task after an evacuation pause
  4268       // which might have evacuated the region we're holding on to
  4269       // underneath our feet. Let's read its limit again to make sure
  4270       // that we do not iterate over a region of the heap that
  4271       // contains garbage (update_region_limit() will also move
  4272       // _finger to the start of the region if it is found empty).
  4273       update_region_limit();
  4274       // We will start from _finger not from the start of the region,
  4275       // as we might be restarting this task after aborting half-way
  4276       // through scanning this region. In this case, _finger points to
  4277       // the address where we last found a marked object. If this is a
  4278       // fresh region, _finger points to start().
  4279       MemRegion mr = MemRegion(_finger, _region_limit);
  4281       if (_cm->verbose_low()) {
  4282         gclog_or_tty->print_cr("[%u] we're scanning part "
  4283                                "["PTR_FORMAT", "PTR_FORMAT") "
  4284                                "of region "HR_FORMAT,
  4285                                _worker_id, p2i(_finger), p2i(_region_limit),
  4286                                HR_FORMAT_PARAMS(_curr_region));
  4289       assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
  4290              "humongous regions should go around loop once only");
  4292       // Some special cases:
  4293       // If the memory region is empty, we can just give up the region.
  4294       // If the current region is humongous then we only need to check
  4295       // the bitmap for the bit associated with the start of the object,
  4296       // scan the object if it's live, and give up the region.
  4297       // Otherwise, let's iterate over the bitmap of the part of the region
  4298       // that is left.
  4299       // If the iteration is successful, give up the region.
  4300       if (mr.is_empty()) {
  4301         giveup_current_region();
  4302         regular_clock_call();
  4303       } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
  4304         if (_nextMarkBitMap->isMarked(mr.start())) {
  4305           // The object is marked - apply the closure
  4306           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
  4307           bitmap_closure.do_bit(offset);
  4309         // Even if this task aborted while scanning the humongous object
  4310         // we can (and should) give up the current region.
  4311         giveup_current_region();
  4312         regular_clock_call();
  4313       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
  4314         giveup_current_region();
  4315         regular_clock_call();
  4316       } else {
  4317         assert(has_aborted(), "currently the only way to do so");
  4318         // The only way to abort the bitmap iteration is to return
  4319         // false from the do_bit() method. However, inside the
  4320         // do_bit() method we move the _finger to point to the
  4321         // object currently being looked at. So, if we bail out, we
  4322         // have definitely set _finger to something non-null.
  4323         assert(_finger != NULL, "invariant");
  4325         // Region iteration was actually aborted. So now _finger
  4326         // points to the address of the object we last scanned. If we
  4327         // leave it there, when we restart this task, we will rescan
  4328         // the object. It is easy to avoid this. We move the finger by
  4329         // enough to point to the next possible object header (the
  4330         // bitmap knows by how much we need to move it as it knows its
  4331         // granularity).
  4332         assert(_finger < _region_limit, "invariant");
  4333         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
  4334         // Check if bitmap iteration was aborted while scanning the last object
  4335         if (new_finger >= _region_limit) {
  4336           giveup_current_region();
  4337         } else {
  4338           move_finger_to(new_finger);
  4342     // At this point we have either completed iterating over the
  4343     // region we were holding on to, or we have aborted.
  4345     // We then partially drain the local queue and the global stack.
  4346     // (Do we really need this?)
  4347     drain_local_queue(true);
  4348     drain_global_stack(true);
  4350     // Read the note on the claim_region() method on why it might
  4351     // return NULL with potentially more regions available for
  4352     // claiming and why we have to check out_of_regions() to determine
  4353     // whether we're done or not.
  4354     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
  4355       // We are going to try to claim a new region. We should have
  4356       // given up on the previous one.
  4357       // Separated the asserts so that we know which one fires.
  4358       assert(_curr_region  == NULL, "invariant");
  4359       assert(_finger       == NULL, "invariant");
  4360       assert(_region_limit == NULL, "invariant");
  4361       if (_cm->verbose_low()) {
  4362         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
  4364       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
  4365       if (claimed_region != NULL) {
  4366         // Yes, we managed to claim one
  4367         statsOnly( ++_regions_claimed );
  4369         if (_cm->verbose_low()) {
  4370           gclog_or_tty->print_cr("[%u] we successfully claimed "
  4371                                  "region "PTR_FORMAT,
  4372                                  _worker_id, p2i(claimed_region));
  4375         setup_for_region(claimed_region);
  4376         assert(_curr_region == claimed_region, "invariant");
  4378       // It is important to call the regular clock here. It might take
  4379       // a while to claim a region if, for example, we hit a large
  4380       // block of empty regions. So we need to call the regular clock
  4381       // method once round the loop to make sure it's called
  4382       // frequently enough.
  4383       regular_clock_call();
  4386     if (!has_aborted() && _curr_region == NULL) {
  4387       assert(_cm->out_of_regions(),
  4388              "at this point we should be out of regions");
  4390   } while ( _curr_region != NULL && !has_aborted());
  4392   if (!has_aborted()) {
  4393     // We cannot check whether the global stack is empty, since other
  4394     // tasks might be pushing objects to it concurrently.
  4395     assert(_cm->out_of_regions(),
  4396            "at this point we should be out of regions");
  4398     if (_cm->verbose_low()) {
  4399       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
  4402     // Try to reduce the number of available SATB buffers so that
  4403     // remark has less work to do.
  4404     drain_satb_buffers();
  4407   // Since we've done everything else, we can now totally drain the
  4408   // local queue and global stack.
  4409   drain_local_queue(false);
  4410   drain_global_stack(false);
  4412   // Attempt at work stealing from other task's queues.
  4413   if (do_stealing && !has_aborted()) {
  4414     // We have not aborted. This means that we have finished all that
  4415     // we could. Let's try to do some stealing...
  4417     // We cannot check whether the global stack is empty, since other
  4418     // tasks might be pushing objects to it concurrently.
  4419     assert(_cm->out_of_regions() && _task_queue->size() == 0,
  4420            "only way to reach here");
  4422     if (_cm->verbose_low()) {
  4423       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
  4426     while (!has_aborted()) {
  4427       oop obj;
  4428       statsOnly( ++_steal_attempts );
  4430       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
  4431         if (_cm->verbose_medium()) {
  4432           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
  4433                                  _worker_id, p2i((void*) obj));
  4436         statsOnly( ++_steals );
  4438         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
  4439                "any stolen object should be marked");
  4440         scan_object(obj);
  4442         // And since we're towards the end, let's totally drain the
  4443         // local queue and global stack.
  4444         drain_local_queue(false);
  4445         drain_global_stack(false);
  4446       } else {
  4447         break;
  4452   // If we are about to wrap up and go into termination, check if we
  4453   // should raise the overflow flag.
  4454   if (do_termination && !has_aborted()) {
  4455     if (_cm->force_overflow()->should_force()) {
  4456       _cm->set_has_overflown();
  4457       regular_clock_call();
  4461   // We still haven't aborted. Now, let's try to get into the
  4462   // termination protocol.
  4463   if (do_termination && !has_aborted()) {
  4464     // We cannot check whether the global stack is empty, since other
  4465     // tasks might be concurrently pushing objects on it.
  4466     // Separated the asserts so that we know which one fires.
  4467     assert(_cm->out_of_regions(), "only way to reach here");
  4468     assert(_task_queue->size() == 0, "only way to reach here");
  4470     if (_cm->verbose_low()) {
  4471       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
  4474     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
  4476     // The CMTask class also extends the TerminatorTerminator class,
  4477     // hence its should_exit_termination() method will also decide
  4478     // whether to exit the termination protocol or not.
  4479     bool finished = (is_serial ||
  4480                      _cm->terminator()->offer_termination(this));
  4481     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
  4482     _termination_time_ms +=
  4483       termination_end_time_ms - _termination_start_time_ms;
  4485     if (finished) {
  4486       // We're all done.
  4488       if (_worker_id == 0) {
  4489         // let's allow task 0 to do this
  4490         if (concurrent()) {
  4491           assert(_cm->concurrent_marking_in_progress(), "invariant");
  4492           // we need to set this to false before the next
  4493           // safepoint. This way we ensure that the marking phase
  4494           // doesn't observe any more heap expansions.
  4495           _cm->clear_concurrent_marking_in_progress();
  4499       // We can now guarantee that the global stack is empty, since
  4500       // all other tasks have finished. We separated the guarantees so
  4501       // that, if a condition is false, we can immediately find out
  4502       // which one.
  4503       guarantee(_cm->out_of_regions(), "only way to reach here");
  4504       guarantee(_cm->mark_stack_empty(), "only way to reach here");
  4505       guarantee(_task_queue->size() == 0, "only way to reach here");
  4506       guarantee(!_cm->has_overflown(), "only way to reach here");
  4507       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
  4509       if (_cm->verbose_low()) {
  4510         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
  4512     } else {
  4513       // Apparently there's more work to do. Let's abort this task. It
  4514       // will restart it and we can hopefully find more things to do.
  4516       if (_cm->verbose_low()) {
  4517         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
  4518                                _worker_id);
  4521       set_has_aborted();
  4522       statsOnly( ++_aborted_termination );
  4526   // Mainly for debugging purposes to make sure that a pointer to the
  4527   // closure which was statically allocated in this frame doesn't
  4528   // escape it by accident.
  4529   set_cm_oop_closure(NULL);
  4530   double end_time_ms = os::elapsedVTime() * 1000.0;
  4531   double elapsed_time_ms = end_time_ms - _start_time_ms;
  4532   // Update the step history.
  4533   _step_times_ms.add(elapsed_time_ms);
  4535   if (has_aborted()) {
  4536     // The task was aborted for some reason.
  4538     statsOnly( ++_aborted );
  4540     if (_has_timed_out) {
  4541       double diff_ms = elapsed_time_ms - _time_target_ms;
  4542       // Keep statistics of how well we did with respect to hitting
  4543       // our target only if we actually timed out (if we aborted for
  4544       // other reasons, then the results might get skewed).
  4545       _marking_step_diffs_ms.add(diff_ms);
  4548     if (_cm->has_overflown()) {
  4549       // This is the interesting one. We aborted because a global
  4550       // overflow was raised. This means we have to restart the
  4551       // marking phase and start iterating over regions. However, in
  4552       // order to do this we have to make sure that all tasks stop
  4553       // what they are doing and re-initialise in a safe manner. We
  4554       // will achieve this with the use of two barrier sync points.
  4556       if (_cm->verbose_low()) {
  4557         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
  4560       if (!is_serial) {
  4561         // We only need to enter the sync barrier if being called
  4562         // from a parallel context
  4563         _cm->enter_first_sync_barrier(_worker_id);
  4565         // When we exit this sync barrier we know that all tasks have
  4566         // stopped doing marking work. So, it's now safe to
  4567         // re-initialise our data structures. At the end of this method,
  4568         // task 0 will clear the global data structures.
  4571       statsOnly( ++_aborted_overflow );
  4573       // We clear the local state of this task...
  4574       clear_region_fields();
  4576       if (!is_serial) {
  4577         // ...and enter the second barrier.
  4578         _cm->enter_second_sync_barrier(_worker_id);
  4580       // At this point, if we're during the concurrent phase of
  4581       // marking, everything has been re-initialized and we're
  4582       // ready to restart.
  4585     if (_cm->verbose_low()) {
  4586       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
  4587                              "elapsed = %1.2lfms <<<<<<<<<<",
  4588                              _worker_id, _time_target_ms, elapsed_time_ms);
  4589       if (_cm->has_aborted()) {
  4590         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
  4591                                _worker_id);
  4594   } else {
  4595     if (_cm->verbose_low()) {
  4596       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
  4597                              "elapsed = %1.2lfms <<<<<<<<<<",
  4598                              _worker_id, _time_target_ms, elapsed_time_ms);
  4602   _claimed = false;
  4605 CMTask::CMTask(uint worker_id,
  4606                ConcurrentMark* cm,
  4607                size_t* marked_bytes,
  4608                BitMap* card_bm,
  4609                CMTaskQueue* task_queue,
  4610                CMTaskQueueSet* task_queues)
  4611   : _g1h(G1CollectedHeap::heap()),
  4612     _worker_id(worker_id), _cm(cm),
  4613     _claimed(false),
  4614     _nextMarkBitMap(NULL), _hash_seed(17),
  4615     _task_queue(task_queue),
  4616     _task_queues(task_queues),
  4617     _cm_oop_closure(NULL),
  4618     _marked_bytes_array(marked_bytes),
  4619     _card_bm(card_bm) {
  4620   guarantee(task_queue != NULL, "invariant");
  4621   guarantee(task_queues != NULL, "invariant");
  4623   statsOnly( _clock_due_to_scanning = 0;
  4624              _clock_due_to_marking  = 0 );
  4626   _marking_step_diffs_ms.add(0.5);
  4629 // These are formatting macros that are used below to ensure
  4630 // consistent formatting. The *_H_* versions are used to format the
  4631 // header for a particular value and they should be kept consistent
  4632 // with the corresponding macro. Also note that most of the macros add
  4633 // the necessary white space (as a prefix) which makes them a bit
  4634 // easier to compose.
  4636 // All the output lines are prefixed with this string to be able to
  4637 // identify them easily in a large log file.
  4638 #define G1PPRL_LINE_PREFIX            "###"
  4640 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
  4641 #ifdef _LP64
  4642 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
  4643 #else // _LP64
  4644 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
  4645 #endif // _LP64
  4647 // For per-region info
  4648 #define G1PPRL_TYPE_FORMAT            "   %-4s"
  4649 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
  4650 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
  4651 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
  4652 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
  4653 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
  4655 // For summary info
  4656 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
  4657 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
  4658 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
  4659 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
  4661 G1PrintRegionLivenessInfoClosure::
  4662 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
  4663   : _out(out),
  4664     _total_used_bytes(0), _total_capacity_bytes(0),
  4665     _total_prev_live_bytes(0), _total_next_live_bytes(0),
  4666     _hum_used_bytes(0), _hum_capacity_bytes(0),
  4667     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
  4668     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
  4669   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  4670   MemRegion g1_reserved = g1h->g1_reserved();
  4671   double now = os::elapsedTime();
  4673   // Print the header of the output.
  4674   _out->cr();
  4675   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
  4676   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
  4677                  G1PPRL_SUM_ADDR_FORMAT("reserved")
  4678                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
  4679                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
  4680                  HeapRegion::GrainBytes);
  4681   _out->print_cr(G1PPRL_LINE_PREFIX);
  4682   _out->print_cr(G1PPRL_LINE_PREFIX
  4683                 G1PPRL_TYPE_H_FORMAT
  4684                 G1PPRL_ADDR_BASE_H_FORMAT
  4685                 G1PPRL_BYTE_H_FORMAT
  4686                 G1PPRL_BYTE_H_FORMAT
  4687                 G1PPRL_BYTE_H_FORMAT
  4688                 G1PPRL_DOUBLE_H_FORMAT
  4689                 G1PPRL_BYTE_H_FORMAT
  4690                 G1PPRL_BYTE_H_FORMAT,
  4691                 "type", "address-range",
  4692                 "used", "prev-live", "next-live", "gc-eff",
  4693                 "remset", "code-roots");
  4694   _out->print_cr(G1PPRL_LINE_PREFIX
  4695                 G1PPRL_TYPE_H_FORMAT
  4696                 G1PPRL_ADDR_BASE_H_FORMAT
  4697                 G1PPRL_BYTE_H_FORMAT
  4698                 G1PPRL_BYTE_H_FORMAT
  4699                 G1PPRL_BYTE_H_FORMAT
  4700                 G1PPRL_DOUBLE_H_FORMAT
  4701                 G1PPRL_BYTE_H_FORMAT
  4702                 G1PPRL_BYTE_H_FORMAT,
  4703                 "", "",
  4704                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
  4705                 "(bytes)", "(bytes)");
  4708 // It takes as a parameter a reference to one of the _hum_* fields, it
  4709 // deduces the corresponding value for a region in a humongous region
  4710 // series (either the region size, or what's left if the _hum_* field
  4711 // is < the region size), and updates the _hum_* field accordingly.
  4712 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
  4713   size_t bytes = 0;
  4714   // The > 0 check is to deal with the prev and next live bytes which
  4715   // could be 0.
  4716   if (*hum_bytes > 0) {
  4717     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
  4718     *hum_bytes -= bytes;
  4720   return bytes;
  4723 // It deduces the values for a region in a humongous region series
  4724 // from the _hum_* fields and updates those accordingly. It assumes
  4725 // that that _hum_* fields have already been set up from the "starts
  4726 // humongous" region and we visit the regions in address order.
  4727 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
  4728                                                      size_t* capacity_bytes,
  4729                                                      size_t* prev_live_bytes,
  4730                                                      size_t* next_live_bytes) {
  4731   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
  4732   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
  4733   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
  4734   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
  4735   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
  4738 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
  4739   const char* type = "";
  4740   HeapWord* bottom       = r->bottom();
  4741   HeapWord* end          = r->end();
  4742   size_t capacity_bytes  = r->capacity();
  4743   size_t used_bytes      = r->used();
  4744   size_t prev_live_bytes = r->live_bytes();
  4745   size_t next_live_bytes = r->next_live_bytes();
  4746   double gc_eff          = r->gc_efficiency();
  4747   size_t remset_bytes    = r->rem_set()->mem_size();
  4748   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
  4750   if (r->used() == 0) {
  4751     type = "FREE";
  4752   } else if (r->is_survivor()) {
  4753     type = "SURV";
  4754   } else if (r->is_young()) {
  4755     type = "EDEN";
  4756   } else if (r->startsHumongous()) {
  4757     type = "HUMS";
  4759     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
  4760            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
  4761            "they should have been zeroed after the last time we used them");
  4762     // Set up the _hum_* fields.
  4763     _hum_capacity_bytes  = capacity_bytes;
  4764     _hum_used_bytes      = used_bytes;
  4765     _hum_prev_live_bytes = prev_live_bytes;
  4766     _hum_next_live_bytes = next_live_bytes;
  4767     get_hum_bytes(&used_bytes, &capacity_bytes,
  4768                   &prev_live_bytes, &next_live_bytes);
  4769     end = bottom + HeapRegion::GrainWords;
  4770   } else if (r->continuesHumongous()) {
  4771     type = "HUMC";
  4772     get_hum_bytes(&used_bytes, &capacity_bytes,
  4773                   &prev_live_bytes, &next_live_bytes);
  4774     assert(end == bottom + HeapRegion::GrainWords, "invariant");
  4775   } else {
  4776     type = "OLD";
  4779   _total_used_bytes      += used_bytes;
  4780   _total_capacity_bytes  += capacity_bytes;
  4781   _total_prev_live_bytes += prev_live_bytes;
  4782   _total_next_live_bytes += next_live_bytes;
  4783   _total_remset_bytes    += remset_bytes;
  4784   _total_strong_code_roots_bytes += strong_code_roots_bytes;
  4786   // Print a line for this particular region.
  4787   _out->print_cr(G1PPRL_LINE_PREFIX
  4788                  G1PPRL_TYPE_FORMAT
  4789                  G1PPRL_ADDR_BASE_FORMAT
  4790                  G1PPRL_BYTE_FORMAT
  4791                  G1PPRL_BYTE_FORMAT
  4792                  G1PPRL_BYTE_FORMAT
  4793                  G1PPRL_DOUBLE_FORMAT
  4794                  G1PPRL_BYTE_FORMAT
  4795                  G1PPRL_BYTE_FORMAT,
  4796                  type, p2i(bottom), p2i(end),
  4797                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
  4798                  remset_bytes, strong_code_roots_bytes);
  4800   return false;
  4803 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
  4804   // add static memory usages to remembered set sizes
  4805   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
  4806   // Print the footer of the output.
  4807   _out->print_cr(G1PPRL_LINE_PREFIX);
  4808   _out->print_cr(G1PPRL_LINE_PREFIX
  4809                  " SUMMARY"
  4810                  G1PPRL_SUM_MB_FORMAT("capacity")
  4811                  G1PPRL_SUM_MB_PERC_FORMAT("used")
  4812                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
  4813                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
  4814                  G1PPRL_SUM_MB_FORMAT("remset")
  4815                  G1PPRL_SUM_MB_FORMAT("code-roots"),
  4816                  bytes_to_mb(_total_capacity_bytes),
  4817                  bytes_to_mb(_total_used_bytes),
  4818                  perc(_total_used_bytes, _total_capacity_bytes),
  4819                  bytes_to_mb(_total_prev_live_bytes),
  4820                  perc(_total_prev_live_bytes, _total_capacity_bytes),
  4821                  bytes_to_mb(_total_next_live_bytes),
  4822                  perc(_total_next_live_bytes, _total_capacity_bytes),
  4823                  bytes_to_mb(_total_remset_bytes),
  4824                  bytes_to_mb(_total_strong_code_roots_bytes));
  4825   _out->cr();

mercurial