src/share/vm/gc_implementation/g1/concurrentMark.cpp

Mon, 20 Jan 2014 11:47:07 +0100

author
tschatzl
date
Mon, 20 Jan 2014 11:47:07 +0100
changeset 6229
5a32d2a3cc1e
parent 6168
1de8e5356754
child 6230
cb7ec2423207
permissions
-rw-r--r--

8027476: Improve performance of Stringtable unlink
8027455: Improve symbol table scan times during gc pauses
Summary: Parallelize string table and symbol table scan during remark and full GC. Some additional statistics output if the experimental flag G1TraceStringSymbolTableScrubbing is set.
Reviewed-by: mgerdin, coleenp, brutisso

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/g1Log.hpp"
    33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    34 #include "gc_implementation/g1/g1RemSet.hpp"
    35 #include "gc_implementation/g1/heapRegion.inline.hpp"
    36 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    38 #include "gc_implementation/shared/vmGCOperations.hpp"
    39 #include "gc_implementation/shared/gcTimer.hpp"
    40 #include "gc_implementation/shared/gcTrace.hpp"
    41 #include "gc_implementation/shared/gcTraceTime.hpp"
    42 #include "memory/genOopClosures.inline.hpp"
    43 #include "memory/referencePolicy.hpp"
    44 #include "memory/resourceArea.hpp"
    45 #include "oops/oop.inline.hpp"
    46 #include "runtime/handles.inline.hpp"
    47 #include "runtime/java.hpp"
    48 #include "services/memTracker.hpp"
    50 // Concurrent marking bit map wrapper
    52 CMBitMapRO::CMBitMapRO(int shifter) :
    53   _bm(),
    54   _shifter(shifter) {
    55   _bmStartWord = 0;
    56   _bmWordSize = 0;
    57 }
    59 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
    60                                                HeapWord* limit) const {
    61   // First we must round addr *up* to a possible object boundary.
    62   addr = (HeapWord*)align_size_up((intptr_t)addr,
    63                                   HeapWordSize << _shifter);
    64   size_t addrOffset = heapWordToOffset(addr);
    65   if (limit == NULL) {
    66     limit = _bmStartWord + _bmWordSize;
    67   }
    68   size_t limitOffset = heapWordToOffset(limit);
    69   size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset);
    70   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
    71   assert(nextAddr >= addr, "get_next_one postcondition");
    72   assert(nextAddr == limit || isMarked(nextAddr),
    73          "get_next_one postcondition");
    74   return nextAddr;
    75 }
    77 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
    78                                                  HeapWord* limit) const {
    79   size_t addrOffset = heapWordToOffset(addr);
    80   if (limit == NULL) {
    81     limit = _bmStartWord + _bmWordSize;
    82   }
    83   size_t limitOffset = heapWordToOffset(limit);
    84   size_t nextOffset = _bm.get_next_zero_offset(addrOffset, limitOffset);
    85   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
    86   assert(nextAddr >= addr, "get_next_one postcondition");
    87   assert(nextAddr == limit || !isMarked(nextAddr),
    88          "get_next_one postcondition");
    89   return nextAddr;
    90 }
    92 int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
    93   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
    94   return (int) (diff >> _shifter);
    95 }
    97 #ifndef PRODUCT
    98 bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
    99   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
   100   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
   101          "size inconsistency");
   102   return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
   103          _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
   104 }
   105 #endif
   107 void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
   108   _bm.print_on_error(st, prefix);
   109 }
   111 bool CMBitMap::allocate(ReservedSpace heap_rs) {
   112   _bmStartWord = (HeapWord*)(heap_rs.base());
   113   _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
   114   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
   115                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
   116   if (!brs.is_reserved()) {
   117     warning("ConcurrentMark marking bit map allocation failure");
   118     return false;
   119   }
   120   MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
   121   // For now we'll just commit all of the bit map up front.
   122   // Later on we'll try to be more parsimonious with swap.
   123   if (!_virtual_space.initialize(brs, brs.size())) {
   124     warning("ConcurrentMark marking bit map backing store failure");
   125     return false;
   126   }
   127   assert(_virtual_space.committed_size() == brs.size(),
   128          "didn't reserve backing store for all of concurrent marking bit map?");
   129   _bm.set_map((uintptr_t*)_virtual_space.low());
   130   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
   131          _bmWordSize, "inconsistency in bit map sizing");
   132   _bm.set_size(_bmWordSize >> _shifter);
   133   return true;
   134 }
   136 void CMBitMap::clearAll() {
   137   _bm.clear();
   138   return;
   139 }
   141 void CMBitMap::markRange(MemRegion mr) {
   142   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
   143   assert(!mr.is_empty(), "unexpected empty region");
   144   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
   145           ((HeapWord *) mr.end())),
   146          "markRange memory region end is not card aligned");
   147   // convert address range into offset range
   148   _bm.at_put_range(heapWordToOffset(mr.start()),
   149                    heapWordToOffset(mr.end()), true);
   150 }
   152 void CMBitMap::clearRange(MemRegion mr) {
   153   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
   154   assert(!mr.is_empty(), "unexpected empty region");
   155   // convert address range into offset range
   156   _bm.at_put_range(heapWordToOffset(mr.start()),
   157                    heapWordToOffset(mr.end()), false);
   158 }
   160 MemRegion CMBitMap::getAndClearMarkedRegion(HeapWord* addr,
   161                                             HeapWord* end_addr) {
   162   HeapWord* start = getNextMarkedWordAddress(addr);
   163   start = MIN2(start, end_addr);
   164   HeapWord* end   = getNextUnmarkedWordAddress(start);
   165   end = MIN2(end, end_addr);
   166   assert(start <= end, "Consistency check");
   167   MemRegion mr(start, end);
   168   if (!mr.is_empty()) {
   169     clearRange(mr);
   170   }
   171   return mr;
   172 }
   174 CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
   175   _base(NULL), _cm(cm)
   176 #ifdef ASSERT
   177   , _drain_in_progress(false)
   178   , _drain_in_progress_yields(false)
   179 #endif
   180 {}
   182 bool CMMarkStack::allocate(size_t capacity) {
   183   // allocate a stack of the requisite depth
   184   ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
   185   if (!rs.is_reserved()) {
   186     warning("ConcurrentMark MarkStack allocation failure");
   187     return false;
   188   }
   189   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
   190   if (!_virtual_space.initialize(rs, rs.size())) {
   191     warning("ConcurrentMark MarkStack backing store failure");
   192     // Release the virtual memory reserved for the marking stack
   193     rs.release();
   194     return false;
   195   }
   196   assert(_virtual_space.committed_size() == rs.size(),
   197          "Didn't reserve backing store for all of ConcurrentMark stack?");
   198   _base = (oop*) _virtual_space.low();
   199   setEmpty();
   200   _capacity = (jint) capacity;
   201   _saved_index = -1;
   202   _should_expand = false;
   203   NOT_PRODUCT(_max_depth = 0);
   204   return true;
   205 }
   207 void CMMarkStack::expand() {
   208   // Called, during remark, if we've overflown the marking stack during marking.
   209   assert(isEmpty(), "stack should been emptied while handling overflow");
   210   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
   211   // Clear expansion flag
   212   _should_expand = false;
   213   if (_capacity == (jint) MarkStackSizeMax) {
   214     if (PrintGCDetails && Verbose) {
   215       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
   216     }
   217     return;
   218   }
   219   // Double capacity if possible
   220   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
   221   // Do not give up existing stack until we have managed to
   222   // get the double capacity that we desired.
   223   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
   224                                                            sizeof(oop)));
   225   if (rs.is_reserved()) {
   226     // Release the backing store associated with old stack
   227     _virtual_space.release();
   228     // Reinitialize virtual space for new stack
   229     if (!_virtual_space.initialize(rs, rs.size())) {
   230       fatal("Not enough swap for expanded marking stack capacity");
   231     }
   232     _base = (oop*)(_virtual_space.low());
   233     _index = 0;
   234     _capacity = new_capacity;
   235   } else {
   236     if (PrintGCDetails && Verbose) {
   237       // Failed to double capacity, continue;
   238       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
   239                           SIZE_FORMAT"K to " SIZE_FORMAT"K",
   240                           _capacity / K, new_capacity / K);
   241     }
   242   }
   243 }
   245 void CMMarkStack::set_should_expand() {
   246   // If we're resetting the marking state because of an
   247   // marking stack overflow, record that we should, if
   248   // possible, expand the stack.
   249   _should_expand = _cm->has_overflown();
   250 }
   252 CMMarkStack::~CMMarkStack() {
   253   if (_base != NULL) {
   254     _base = NULL;
   255     _virtual_space.release();
   256   }
   257 }
   259 void CMMarkStack::par_push(oop ptr) {
   260   while (true) {
   261     if (isFull()) {
   262       _overflow = true;
   263       return;
   264     }
   265     // Otherwise...
   266     jint index = _index;
   267     jint next_index = index+1;
   268     jint res = Atomic::cmpxchg(next_index, &_index, index);
   269     if (res == index) {
   270       _base[index] = ptr;
   271       // Note that we don't maintain this atomically.  We could, but it
   272       // doesn't seem necessary.
   273       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
   274       return;
   275     }
   276     // Otherwise, we need to try again.
   277   }
   278 }
   280 void CMMarkStack::par_adjoin_arr(oop* ptr_arr, int n) {
   281   while (true) {
   282     if (isFull()) {
   283       _overflow = true;
   284       return;
   285     }
   286     // Otherwise...
   287     jint index = _index;
   288     jint next_index = index + n;
   289     if (next_index > _capacity) {
   290       _overflow = true;
   291       return;
   292     }
   293     jint res = Atomic::cmpxchg(next_index, &_index, index);
   294     if (res == index) {
   295       for (int i = 0; i < n; i++) {
   296         int  ind = index + i;
   297         assert(ind < _capacity, "By overflow test above.");
   298         _base[ind] = ptr_arr[i];
   299       }
   300       NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
   301       return;
   302     }
   303     // Otherwise, we need to try again.
   304   }
   305 }
   307 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
   308   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
   309   jint start = _index;
   310   jint next_index = start + n;
   311   if (next_index > _capacity) {
   312     _overflow = true;
   313     return;
   314   }
   315   // Otherwise.
   316   _index = next_index;
   317   for (int i = 0; i < n; i++) {
   318     int ind = start + i;
   319     assert(ind < _capacity, "By overflow test above.");
   320     _base[ind] = ptr_arr[i];
   321   }
   322   NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
   323 }
   325 bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
   326   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
   327   jint index = _index;
   328   if (index == 0) {
   329     *n = 0;
   330     return false;
   331   } else {
   332     int k = MIN2(max, index);
   333     jint  new_ind = index - k;
   334     for (int j = 0; j < k; j++) {
   335       ptr_arr[j] = _base[new_ind + j];
   336     }
   337     _index = new_ind;
   338     *n = k;
   339     return true;
   340   }
   341 }
   343 template<class OopClosureClass>
   344 bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
   345   assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after
   346          || SafepointSynchronize::is_at_safepoint(),
   347          "Drain recursion must be yield-safe.");
   348   bool res = true;
   349   debug_only(_drain_in_progress = true);
   350   debug_only(_drain_in_progress_yields = yield_after);
   351   while (!isEmpty()) {
   352     oop newOop = pop();
   353     assert(G1CollectedHeap::heap()->is_in_reserved(newOop), "Bad pop");
   354     assert(newOop->is_oop(), "Expected an oop");
   355     assert(bm == NULL || bm->isMarked((HeapWord*)newOop),
   356            "only grey objects on this stack");
   357     newOop->oop_iterate(cl);
   358     if (yield_after && _cm->do_yield_check()) {
   359       res = false;
   360       break;
   361     }
   362   }
   363   debug_only(_drain_in_progress = false);
   364   return res;
   365 }
   367 void CMMarkStack::note_start_of_gc() {
   368   assert(_saved_index == -1,
   369          "note_start_of_gc()/end_of_gc() bracketed incorrectly");
   370   _saved_index = _index;
   371 }
   373 void CMMarkStack::note_end_of_gc() {
   374   // This is intentionally a guarantee, instead of an assert. If we
   375   // accidentally add something to the mark stack during GC, it
   376   // will be a correctness issue so it's better if we crash. we'll
   377   // only check this once per GC anyway, so it won't be a performance
   378   // issue in any way.
   379   guarantee(_saved_index == _index,
   380             err_msg("saved index: %d index: %d", _saved_index, _index));
   381   _saved_index = -1;
   382 }
   384 void CMMarkStack::oops_do(OopClosure* f) {
   385   assert(_saved_index == _index,
   386          err_msg("saved index: %d index: %d", _saved_index, _index));
   387   for (int i = 0; i < _index; i += 1) {
   388     f->do_oop(&_base[i]);
   389   }
   390 }
   392 bool ConcurrentMark::not_yet_marked(oop obj) const {
   393   return _g1h->is_obj_ill(obj);
   394 }
   396 CMRootRegions::CMRootRegions() :
   397   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
   398   _should_abort(false),  _next_survivor(NULL) { }
   400 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
   401   _young_list = g1h->young_list();
   402   _cm = cm;
   403 }
   405 void CMRootRegions::prepare_for_scan() {
   406   assert(!scan_in_progress(), "pre-condition");
   408   // Currently, only survivors can be root regions.
   409   assert(_next_survivor == NULL, "pre-condition");
   410   _next_survivor = _young_list->first_survivor_region();
   411   _scan_in_progress = (_next_survivor != NULL);
   412   _should_abort = false;
   413 }
   415 HeapRegion* CMRootRegions::claim_next() {
   416   if (_should_abort) {
   417     // If someone has set the should_abort flag, we return NULL to
   418     // force the caller to bail out of their loop.
   419     return NULL;
   420   }
   422   // Currently, only survivors can be root regions.
   423   HeapRegion* res = _next_survivor;
   424   if (res != NULL) {
   425     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
   426     // Read it again in case it changed while we were waiting for the lock.
   427     res = _next_survivor;
   428     if (res != NULL) {
   429       if (res == _young_list->last_survivor_region()) {
   430         // We just claimed the last survivor so store NULL to indicate
   431         // that we're done.
   432         _next_survivor = NULL;
   433       } else {
   434         _next_survivor = res->get_next_young_region();
   435       }
   436     } else {
   437       // Someone else claimed the last survivor while we were trying
   438       // to take the lock so nothing else to do.
   439     }
   440   }
   441   assert(res == NULL || res->is_survivor(), "post-condition");
   443   return res;
   444 }
   446 void CMRootRegions::scan_finished() {
   447   assert(scan_in_progress(), "pre-condition");
   449   // Currently, only survivors can be root regions.
   450   if (!_should_abort) {
   451     assert(_next_survivor == NULL, "we should have claimed all survivors");
   452   }
   453   _next_survivor = NULL;
   455   {
   456     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
   457     _scan_in_progress = false;
   458     RootRegionScan_lock->notify_all();
   459   }
   460 }
   462 bool CMRootRegions::wait_until_scan_finished() {
   463   if (!scan_in_progress()) return false;
   465   {
   466     MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
   467     while (scan_in_progress()) {
   468       RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
   469     }
   470   }
   471   return true;
   472 }
   474 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   475 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   476 #endif // _MSC_VER
   478 uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
   479   return MAX2((n_par_threads + 2) / 4, 1U);
   480 }
   482 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
   483   _g1h(g1h),
   484   _markBitMap1(log2_intptr(MinObjAlignment)),
   485   _markBitMap2(log2_intptr(MinObjAlignment)),
   486   _parallel_marking_threads(0),
   487   _max_parallel_marking_threads(0),
   488   _sleep_factor(0.0),
   489   _marking_task_overhead(1.0),
   490   _cleanup_sleep_factor(0.0),
   491   _cleanup_task_overhead(1.0),
   492   _cleanup_list("Cleanup List"),
   493   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
   494   _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
   495             CardTableModRefBS::card_shift,
   496             false /* in_resource_area*/),
   498   _prevMarkBitMap(&_markBitMap1),
   499   _nextMarkBitMap(&_markBitMap2),
   501   _markStack(this),
   502   // _finger set in set_non_marking_state
   504   _max_worker_id(MAX2((uint)ParallelGCThreads, 1U)),
   505   // _active_tasks set in set_non_marking_state
   506   // _tasks set inside the constructor
   507   _task_queues(new CMTaskQueueSet((int) _max_worker_id)),
   508   _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
   510   _has_overflown(false),
   511   _concurrent(false),
   512   _has_aborted(false),
   513   _restart_for_overflow(false),
   514   _concurrent_marking_in_progress(false),
   516   // _verbose_level set below
   518   _init_times(),
   519   _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
   520   _cleanup_times(),
   521   _total_counting_time(0.0),
   522   _total_rs_scrub_time(0.0),
   524   _parallel_workers(NULL),
   526   _count_card_bitmaps(NULL),
   527   _count_marked_bytes(NULL),
   528   _completed_initialization(false) {
   529   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
   530   if (verbose_level < no_verbose) {
   531     verbose_level = no_verbose;
   532   }
   533   if (verbose_level > high_verbose) {
   534     verbose_level = high_verbose;
   535   }
   536   _verbose_level = verbose_level;
   538   if (verbose_low()) {
   539     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
   540                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
   541   }
   543   if (!_markBitMap1.allocate(heap_rs)) {
   544     warning("Failed to allocate first CM bit map");
   545     return;
   546   }
   547   if (!_markBitMap2.allocate(heap_rs)) {
   548     warning("Failed to allocate second CM bit map");
   549     return;
   550   }
   552   // Create & start a ConcurrentMark thread.
   553   _cmThread = new ConcurrentMarkThread(this);
   554   assert(cmThread() != NULL, "CM Thread should have been created");
   555   assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
   556   if (_cmThread->osthread() == NULL) {
   557       vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
   558   }
   560   assert(CGC_lock != NULL, "Where's the CGC_lock?");
   561   assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
   562   assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
   564   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
   565   satb_qs.set_buffer_size(G1SATBBufferSize);
   567   _root_regions.init(_g1h, this);
   569   if (ConcGCThreads > ParallelGCThreads) {
   570     warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
   571             "than ParallelGCThreads (" UINT32_FORMAT ").",
   572             ConcGCThreads, ParallelGCThreads);
   573     return;
   574   }
   575   if (ParallelGCThreads == 0) {
   576     // if we are not running with any parallel GC threads we will not
   577     // spawn any marking threads either
   578     _parallel_marking_threads =       0;
   579     _max_parallel_marking_threads =   0;
   580     _sleep_factor             =     0.0;
   581     _marking_task_overhead    =     1.0;
   582   } else {
   583     if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
   584       // Note: ConcGCThreads has precedence over G1MarkingOverheadPercent
   585       // if both are set
   586       _sleep_factor             = 0.0;
   587       _marking_task_overhead    = 1.0;
   588     } else if (G1MarkingOverheadPercent > 0) {
   589       // We will calculate the number of parallel marking threads based
   590       // on a target overhead with respect to the soft real-time goal
   591       double marking_overhead = (double) G1MarkingOverheadPercent / 100.0;
   592       double overall_cm_overhead =
   593         (double) MaxGCPauseMillis * marking_overhead /
   594         (double) GCPauseIntervalMillis;
   595       double cpu_ratio = 1.0 / (double) os::processor_count();
   596       double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
   597       double marking_task_overhead =
   598         overall_cm_overhead / marking_thread_num *
   599                                                 (double) os::processor_count();
   600       double sleep_factor =
   601                          (1.0 - marking_task_overhead) / marking_task_overhead;
   603       FLAG_SET_ERGO(uintx, ConcGCThreads, (uint) marking_thread_num);
   604       _sleep_factor             = sleep_factor;
   605       _marking_task_overhead    = marking_task_overhead;
   606     } else {
   607       // Calculate the number of parallel marking threads by scaling
   608       // the number of parallel GC threads.
   609       uint marking_thread_num = scale_parallel_threads((uint) ParallelGCThreads);
   610       FLAG_SET_ERGO(uintx, ConcGCThreads, marking_thread_num);
   611       _sleep_factor             = 0.0;
   612       _marking_task_overhead    = 1.0;
   613     }
   615     assert(ConcGCThreads > 0, "Should have been set");
   616     _parallel_marking_threads = (uint) ConcGCThreads;
   617     _max_parallel_marking_threads = _parallel_marking_threads;
   619     if (parallel_marking_threads() > 1) {
   620       _cleanup_task_overhead = 1.0;
   621     } else {
   622       _cleanup_task_overhead = marking_task_overhead();
   623     }
   624     _cleanup_sleep_factor =
   625                      (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
   627 #if 0
   628     gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
   629     gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
   630     gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
   631     gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
   632     gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
   633 #endif
   635     guarantee(parallel_marking_threads() > 0, "peace of mind");
   636     _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
   637          _max_parallel_marking_threads, false, true);
   638     if (_parallel_workers == NULL) {
   639       vm_exit_during_initialization("Failed necessary allocation.");
   640     } else {
   641       _parallel_workers->initialize_workers();
   642     }
   643   }
   645   if (FLAG_IS_DEFAULT(MarkStackSize)) {
   646     uintx mark_stack_size =
   647       MIN2(MarkStackSizeMax,
   648           MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
   649     // Verify that the calculated value for MarkStackSize is in range.
   650     // It would be nice to use the private utility routine from Arguments.
   651     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
   652       warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
   653               "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
   654               mark_stack_size, 1, MarkStackSizeMax);
   655       return;
   656     }
   657     FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
   658   } else {
   659     // Verify MarkStackSize is in range.
   660     if (FLAG_IS_CMDLINE(MarkStackSize)) {
   661       if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
   662         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
   663           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
   664                   "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
   665                   MarkStackSize, 1, MarkStackSizeMax);
   666           return;
   667         }
   668       } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
   669         if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
   670           warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
   671                   " or for MarkStackSizeMax (" UINTX_FORMAT ")",
   672                   MarkStackSize, MarkStackSizeMax);
   673           return;
   674         }
   675       }
   676     }
   677   }
   679   if (!_markStack.allocate(MarkStackSize)) {
   680     warning("Failed to allocate CM marking stack");
   681     return;
   682   }
   684   _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
   685   _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
   687   _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
   688   _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
   690   BitMap::idx_t card_bm_size = _card_bm.size();
   692   // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
   693   _active_tasks = _max_worker_id;
   695   size_t max_regions = (size_t) _g1h->max_regions();
   696   for (uint i = 0; i < _max_worker_id; ++i) {
   697     CMTaskQueue* task_queue = new CMTaskQueue();
   698     task_queue->initialize();
   699     _task_queues->register_queue(i, task_queue);
   701     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
   702     _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
   704     _tasks[i] = new CMTask(i, this,
   705                            _count_marked_bytes[i],
   706                            &_count_card_bitmaps[i],
   707                            task_queue, _task_queues);
   709     _accum_task_vtime[i] = 0.0;
   710   }
   712   // Calculate the card number for the bottom of the heap. Used
   713   // in biasing indexes into the accounting card bitmaps.
   714   _heap_bottom_card_num =
   715     intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
   716                                 CardTableModRefBS::card_shift);
   718   // Clear all the liveness counting data
   719   clear_all_count_data();
   721   // so that the call below can read a sensible value
   722   _heap_start = (HeapWord*) heap_rs.base();
   723   set_non_marking_state();
   724   _completed_initialization = true;
   725 }
   727 void ConcurrentMark::update_g1_committed(bool force) {
   728   // If concurrent marking is not in progress, then we do not need to
   729   // update _heap_end.
   730   if (!concurrent_marking_in_progress() && !force) return;
   732   MemRegion committed = _g1h->g1_committed();
   733   assert(committed.start() == _heap_start, "start shouldn't change");
   734   HeapWord* new_end = committed.end();
   735   if (new_end > _heap_end) {
   736     // The heap has been expanded.
   738     _heap_end = new_end;
   739   }
   740   // Notice that the heap can also shrink. However, this only happens
   741   // during a Full GC (at least currently) and the entire marking
   742   // phase will bail out and the task will not be restarted. So, let's
   743   // do nothing.
   744 }
   746 void ConcurrentMark::reset() {
   747   // Starting values for these two. This should be called in a STW
   748   // phase. CM will be notified of any future g1_committed expansions
   749   // will be at the end of evacuation pauses, when tasks are
   750   // inactive.
   751   MemRegion committed = _g1h->g1_committed();
   752   _heap_start = committed.start();
   753   _heap_end   = committed.end();
   755   // Separated the asserts so that we know which one fires.
   756   assert(_heap_start != NULL, "heap bounds should look ok");
   757   assert(_heap_end != NULL, "heap bounds should look ok");
   758   assert(_heap_start < _heap_end, "heap bounds should look ok");
   760   // Reset all the marking data structures and any necessary flags
   761   reset_marking_state();
   763   if (verbose_low()) {
   764     gclog_or_tty->print_cr("[global] resetting");
   765   }
   767   // We do reset all of them, since different phases will use
   768   // different number of active threads. So, it's easiest to have all
   769   // of them ready.
   770   for (uint i = 0; i < _max_worker_id; ++i) {
   771     _tasks[i]->reset(_nextMarkBitMap);
   772   }
   774   // we need this to make sure that the flag is on during the evac
   775   // pause with initial mark piggy-backed
   776   set_concurrent_marking_in_progress();
   777 }
   780 void ConcurrentMark::reset_marking_state(bool clear_overflow) {
   781   _markStack.set_should_expand();
   782   _markStack.setEmpty();        // Also clears the _markStack overflow flag
   783   if (clear_overflow) {
   784     clear_has_overflown();
   785   } else {
   786     assert(has_overflown(), "pre-condition");
   787   }
   788   _finger = _heap_start;
   790   for (uint i = 0; i < _max_worker_id; ++i) {
   791     CMTaskQueue* queue = _task_queues->queue(i);
   792     queue->set_empty();
   793   }
   794 }
   796 void ConcurrentMark::set_concurrency(uint active_tasks) {
   797   assert(active_tasks <= _max_worker_id, "we should not have more");
   799   _active_tasks = active_tasks;
   800   // Need to update the three data structures below according to the
   801   // number of active threads for this phase.
   802   _terminator   = ParallelTaskTerminator((int) active_tasks, _task_queues);
   803   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
   804   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
   805 }
   807 void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
   808   set_concurrency(active_tasks);
   810   _concurrent = concurrent;
   811   // We propagate this to all tasks, not just the active ones.
   812   for (uint i = 0; i < _max_worker_id; ++i)
   813     _tasks[i]->set_concurrent(concurrent);
   815   if (concurrent) {
   816     set_concurrent_marking_in_progress();
   817   } else {
   818     // We currently assume that the concurrent flag has been set to
   819     // false before we start remark. At this point we should also be
   820     // in a STW phase.
   821     assert(!concurrent_marking_in_progress(), "invariant");
   822     assert(_finger == _heap_end,
   823            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
   824                    _finger, _heap_end));
   825     update_g1_committed(true);
   826   }
   827 }
   829 void ConcurrentMark::set_non_marking_state() {
   830   // We set the global marking state to some default values when we're
   831   // not doing marking.
   832   reset_marking_state();
   833   _active_tasks = 0;
   834   clear_concurrent_marking_in_progress();
   835 }
   837 ConcurrentMark::~ConcurrentMark() {
   838   // The ConcurrentMark instance is never freed.
   839   ShouldNotReachHere();
   840 }
   842 void ConcurrentMark::clearNextBitmap() {
   843   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   844   G1CollectorPolicy* g1p = g1h->g1_policy();
   846   // Make sure that the concurrent mark thread looks to still be in
   847   // the current cycle.
   848   guarantee(cmThread()->during_cycle(), "invariant");
   850   // We are finishing up the current cycle by clearing the next
   851   // marking bitmap and getting it ready for the next cycle. During
   852   // this time no other cycle can start. So, let's make sure that this
   853   // is the case.
   854   guarantee(!g1h->mark_in_progress(), "invariant");
   856   // clear the mark bitmap (no grey objects to start with).
   857   // We need to do this in chunks and offer to yield in between
   858   // each chunk.
   859   HeapWord* start  = _nextMarkBitMap->startWord();
   860   HeapWord* end    = _nextMarkBitMap->endWord();
   861   HeapWord* cur    = start;
   862   size_t chunkSize = M;
   863   while (cur < end) {
   864     HeapWord* next = cur + chunkSize;
   865     if (next > end) {
   866       next = end;
   867     }
   868     MemRegion mr(cur,next);
   869     _nextMarkBitMap->clearRange(mr);
   870     cur = next;
   871     do_yield_check();
   873     // Repeat the asserts from above. We'll do them as asserts here to
   874     // minimize their overhead on the product. However, we'll have
   875     // them as guarantees at the beginning / end of the bitmap
   876     // clearing to get some checking in the product.
   877     assert(cmThread()->during_cycle(), "invariant");
   878     assert(!g1h->mark_in_progress(), "invariant");
   879   }
   881   // Clear the liveness counting data
   882   clear_all_count_data();
   884   // Repeat the asserts from above.
   885   guarantee(cmThread()->during_cycle(), "invariant");
   886   guarantee(!g1h->mark_in_progress(), "invariant");
   887 }
   889 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
   890 public:
   891   bool doHeapRegion(HeapRegion* r) {
   892     if (!r->continuesHumongous()) {
   893       r->note_start_of_marking();
   894     }
   895     return false;
   896   }
   897 };
   899 void ConcurrentMark::checkpointRootsInitialPre() {
   900   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
   901   G1CollectorPolicy* g1p = g1h->g1_policy();
   903   _has_aborted = false;
   905 #ifndef PRODUCT
   906   if (G1PrintReachableAtInitialMark) {
   907     print_reachable("at-cycle-start",
   908                     VerifyOption_G1UsePrevMarking, true /* all */);
   909   }
   910 #endif
   912   // Initialise marking structures. This has to be done in a STW phase.
   913   reset();
   915   // For each region note start of marking.
   916   NoteStartOfMarkHRClosure startcl;
   917   g1h->heap_region_iterate(&startcl);
   918 }
   921 void ConcurrentMark::checkpointRootsInitialPost() {
   922   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
   924   // If we force an overflow during remark, the remark operation will
   925   // actually abort and we'll restart concurrent marking. If we always
   926   // force an oveflow during remark we'll never actually complete the
   927   // marking phase. So, we initilize this here, at the start of the
   928   // cycle, so that at the remaining overflow number will decrease at
   929   // every remark and we'll eventually not need to cause one.
   930   force_overflow_stw()->init();
   932   // Start Concurrent Marking weak-reference discovery.
   933   ReferenceProcessor* rp = g1h->ref_processor_cm();
   934   // enable ("weak") refs discovery
   935   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   936   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
   938   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
   939   // This is the start of  the marking cycle, we're expected all
   940   // threads to have SATB queues with active set to false.
   941   satb_mq_set.set_active_all_threads(true, /* new active value */
   942                                      false /* expected_active */);
   944   _root_regions.prepare_for_scan();
   946   // update_g1_committed() will be called at the end of an evac pause
   947   // when marking is on. So, it's also called at the end of the
   948   // initial-mark pause to update the heap end, if the heap expands
   949   // during it. No need to call it here.
   950 }
   952 /*
   953  * Notice that in the next two methods, we actually leave the STS
   954  * during the barrier sync and join it immediately afterwards. If we
   955  * do not do this, the following deadlock can occur: one thread could
   956  * be in the barrier sync code, waiting for the other thread to also
   957  * sync up, whereas another one could be trying to yield, while also
   958  * waiting for the other threads to sync up too.
   959  *
   960  * Note, however, that this code is also used during remark and in
   961  * this case we should not attempt to leave / enter the STS, otherwise
   962  * we'll either hit an asseert (debug / fastdebug) or deadlock
   963  * (product). So we should only leave / enter the STS if we are
   964  * operating concurrently.
   965  *
   966  * Because the thread that does the sync barrier has left the STS, it
   967  * is possible to be suspended for a Full GC or an evacuation pause
   968  * could occur. This is actually safe, since the entering the sync
   969  * barrier is one of the last things do_marking_step() does, and it
   970  * doesn't manipulate any data structures afterwards.
   971  */
   973 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
   974   if (verbose_low()) {
   975     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
   976   }
   978   if (concurrent()) {
   979     ConcurrentGCThread::stsLeave();
   980   }
   981   _first_overflow_barrier_sync.enter();
   982   if (concurrent()) {
   983     ConcurrentGCThread::stsJoin();
   984   }
   985   // at this point everyone should have synced up and not be doing any
   986   // more work
   988   if (verbose_low()) {
   989     gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
   990   }
   992   // If we're executing the concurrent phase of marking, reset the marking
   993   // state; otherwise the marking state is reset after reference processing,
   994   // during the remark pause.
   995   // If we reset here as a result of an overflow during the remark we will
   996   // see assertion failures from any subsequent set_concurrency_and_phase()
   997   // calls.
   998   if (concurrent()) {
   999     // let the task associated with with worker 0 do this
  1000     if (worker_id == 0) {
  1001       // task 0 is responsible for clearing the global data structures
  1002       // We should be here because of an overflow. During STW we should
  1003       // not clear the overflow flag since we rely on it being true when
  1004       // we exit this method to abort the pause and restart concurent
  1005       // marking.
  1006       reset_marking_state(true /* clear_overflow */);
  1007       force_overflow()->update();
  1009       if (G1Log::fine()) {
  1010         gclog_or_tty->date_stamp(PrintGCDateStamps);
  1011         gclog_or_tty->stamp(PrintGCTimeStamps);
  1012         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
  1017   // after this, each task should reset its own data structures then
  1018   // then go into the second barrier
  1021 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
  1022   if (verbose_low()) {
  1023     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
  1026   if (concurrent()) {
  1027     ConcurrentGCThread::stsLeave();
  1029   _second_overflow_barrier_sync.enter();
  1030   if (concurrent()) {
  1031     ConcurrentGCThread::stsJoin();
  1033   // at this point everything should be re-initialized and ready to go
  1035   if (verbose_low()) {
  1036     gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
  1040 #ifndef PRODUCT
  1041 void ForceOverflowSettings::init() {
  1042   _num_remaining = G1ConcMarkForceOverflow;
  1043   _force = false;
  1044   update();
  1047 void ForceOverflowSettings::update() {
  1048   if (_num_remaining > 0) {
  1049     _num_remaining -= 1;
  1050     _force = true;
  1051   } else {
  1052     _force = false;
  1056 bool ForceOverflowSettings::should_force() {
  1057   if (_force) {
  1058     _force = false;
  1059     return true;
  1060   } else {
  1061     return false;
  1064 #endif // !PRODUCT
  1066 class CMConcurrentMarkingTask: public AbstractGangTask {
  1067 private:
  1068   ConcurrentMark*       _cm;
  1069   ConcurrentMarkThread* _cmt;
  1071 public:
  1072   void work(uint worker_id) {
  1073     assert(Thread::current()->is_ConcurrentGC_thread(),
  1074            "this should only be done by a conc GC thread");
  1075     ResourceMark rm;
  1077     double start_vtime = os::elapsedVTime();
  1079     ConcurrentGCThread::stsJoin();
  1081     assert(worker_id < _cm->active_tasks(), "invariant");
  1082     CMTask* the_task = _cm->task(worker_id);
  1083     the_task->record_start_time();
  1084     if (!_cm->has_aborted()) {
  1085       do {
  1086         double start_vtime_sec = os::elapsedVTime();
  1087         double start_time_sec = os::elapsedTime();
  1088         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
  1090         the_task->do_marking_step(mark_step_duration_ms,
  1091                                   true  /* do_termination */,
  1092                                   false /* is_serial*/);
  1094         double end_time_sec = os::elapsedTime();
  1095         double end_vtime_sec = os::elapsedVTime();
  1096         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
  1097         double elapsed_time_sec = end_time_sec - start_time_sec;
  1098         _cm->clear_has_overflown();
  1100         bool ret = _cm->do_yield_check(worker_id);
  1102         jlong sleep_time_ms;
  1103         if (!_cm->has_aborted() && the_task->has_aborted()) {
  1104           sleep_time_ms =
  1105             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
  1106           ConcurrentGCThread::stsLeave();
  1107           os::sleep(Thread::current(), sleep_time_ms, false);
  1108           ConcurrentGCThread::stsJoin();
  1110         double end_time2_sec = os::elapsedTime();
  1111         double elapsed_time2_sec = end_time2_sec - start_time_sec;
  1113 #if 0
  1114           gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
  1115                                  "overhead %1.4lf",
  1116                                  elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
  1117                                  the_task->conc_overhead(os::elapsedTime()) * 8.0);
  1118           gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
  1119                                  elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
  1120 #endif
  1121       } while (!_cm->has_aborted() && the_task->has_aborted());
  1123     the_task->record_end_time();
  1124     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
  1126     ConcurrentGCThread::stsLeave();
  1128     double end_vtime = os::elapsedVTime();
  1129     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
  1132   CMConcurrentMarkingTask(ConcurrentMark* cm,
  1133                           ConcurrentMarkThread* cmt) :
  1134       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
  1136   ~CMConcurrentMarkingTask() { }
  1137 };
  1139 // Calculates the number of active workers for a concurrent
  1140 // phase.
  1141 uint ConcurrentMark::calc_parallel_marking_threads() {
  1142   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1143     uint n_conc_workers = 0;
  1144     if (!UseDynamicNumberOfGCThreads ||
  1145         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
  1146          !ForceDynamicNumberOfGCThreads)) {
  1147       n_conc_workers = max_parallel_marking_threads();
  1148     } else {
  1149       n_conc_workers =
  1150         AdaptiveSizePolicy::calc_default_active_workers(
  1151                                      max_parallel_marking_threads(),
  1152                                      1, /* Minimum workers */
  1153                                      parallel_marking_threads(),
  1154                                      Threads::number_of_non_daemon_threads());
  1155       // Don't scale down "n_conc_workers" by scale_parallel_threads() because
  1156       // that scaling has already gone into "_max_parallel_marking_threads".
  1158     assert(n_conc_workers > 0, "Always need at least 1");
  1159     return n_conc_workers;
  1161   // If we are not running with any parallel GC threads we will not
  1162   // have spawned any marking threads either. Hence the number of
  1163   // concurrent workers should be 0.
  1164   return 0;
  1167 void ConcurrentMark::scanRootRegion(HeapRegion* hr, uint worker_id) {
  1168   // Currently, only survivors can be root regions.
  1169   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
  1170   G1RootRegionScanClosure cl(_g1h, this, worker_id);
  1172   const uintx interval = PrefetchScanIntervalInBytes;
  1173   HeapWord* curr = hr->bottom();
  1174   const HeapWord* end = hr->top();
  1175   while (curr < end) {
  1176     Prefetch::read(curr, interval);
  1177     oop obj = oop(curr);
  1178     int size = obj->oop_iterate(&cl);
  1179     assert(size == obj->size(), "sanity");
  1180     curr += size;
  1184 class CMRootRegionScanTask : public AbstractGangTask {
  1185 private:
  1186   ConcurrentMark* _cm;
  1188 public:
  1189   CMRootRegionScanTask(ConcurrentMark* cm) :
  1190     AbstractGangTask("Root Region Scan"), _cm(cm) { }
  1192   void work(uint worker_id) {
  1193     assert(Thread::current()->is_ConcurrentGC_thread(),
  1194            "this should only be done by a conc GC thread");
  1196     CMRootRegions* root_regions = _cm->root_regions();
  1197     HeapRegion* hr = root_regions->claim_next();
  1198     while (hr != NULL) {
  1199       _cm->scanRootRegion(hr, worker_id);
  1200       hr = root_regions->claim_next();
  1203 };
  1205 void ConcurrentMark::scanRootRegions() {
  1206   // scan_in_progress() will have been set to true only if there was
  1207   // at least one root region to scan. So, if it's false, we
  1208   // should not attempt to do any further work.
  1209   if (root_regions()->scan_in_progress()) {
  1210     _parallel_marking_threads = calc_parallel_marking_threads();
  1211     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
  1212            "Maximum number of marking threads exceeded");
  1213     uint active_workers = MAX2(1U, parallel_marking_threads());
  1215     CMRootRegionScanTask task(this);
  1216     if (use_parallel_marking_threads()) {
  1217       _parallel_workers->set_active_workers((int) active_workers);
  1218       _parallel_workers->run_task(&task);
  1219     } else {
  1220       task.work(0);
  1223     // It's possible that has_aborted() is true here without actually
  1224     // aborting the survivor scan earlier. This is OK as it's
  1225     // mainly used for sanity checking.
  1226     root_regions()->scan_finished();
  1230 void ConcurrentMark::markFromRoots() {
  1231   // we might be tempted to assert that:
  1232   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
  1233   //        "inconsistent argument?");
  1234   // However that wouldn't be right, because it's possible that
  1235   // a safepoint is indeed in progress as a younger generation
  1236   // stop-the-world GC happens even as we mark in this generation.
  1238   _restart_for_overflow = false;
  1239   force_overflow_conc()->init();
  1241   // _g1h has _n_par_threads
  1242   _parallel_marking_threads = calc_parallel_marking_threads();
  1243   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
  1244     "Maximum number of marking threads exceeded");
  1246   uint active_workers = MAX2(1U, parallel_marking_threads());
  1248   // Parallel task terminator is set in "set_concurrency_and_phase()"
  1249   set_concurrency_and_phase(active_workers, true /* concurrent */);
  1251   CMConcurrentMarkingTask markingTask(this, cmThread());
  1252   if (use_parallel_marking_threads()) {
  1253     _parallel_workers->set_active_workers((int)active_workers);
  1254     // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
  1255     // and the decisions on that MT processing is made elsewhere.
  1256     assert(_parallel_workers->active_workers() > 0, "Should have been set");
  1257     _parallel_workers->run_task(&markingTask);
  1258   } else {
  1259     markingTask.work(0);
  1261   print_stats();
  1264 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
  1265   // world is stopped at this checkpoint
  1266   assert(SafepointSynchronize::is_at_safepoint(),
  1267          "world should be stopped");
  1269   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1271   // If a full collection has happened, we shouldn't do this.
  1272   if (has_aborted()) {
  1273     g1h->set_marking_complete(); // So bitmap clearing isn't confused
  1274     return;
  1277   SvcGCMarker sgcm(SvcGCMarker::OTHER);
  1279   if (VerifyDuringGC) {
  1280     HandleMark hm;  // handle scope
  1281     Universe::heap()->prepare_for_verify();
  1282     Universe::verify(VerifyOption_G1UsePrevMarking,
  1283                      " VerifyDuringGC:(before)");
  1286   G1CollectorPolicy* g1p = g1h->g1_policy();
  1287   g1p->record_concurrent_mark_remark_start();
  1289   double start = os::elapsedTime();
  1291   checkpointRootsFinalWork();
  1293   double mark_work_end = os::elapsedTime();
  1295   weakRefsWork(clear_all_soft_refs);
  1297   if (has_overflown()) {
  1298     // Oops.  We overflowed.  Restart concurrent marking.
  1299     _restart_for_overflow = true;
  1300     if (G1TraceMarkStackOverflow) {
  1301       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
  1304     // Verify the heap w.r.t. the previous marking bitmap.
  1305     if (VerifyDuringGC) {
  1306       HandleMark hm;  // handle scope
  1307       Universe::heap()->prepare_for_verify();
  1308       Universe::verify(VerifyOption_G1UsePrevMarking,
  1309                        " VerifyDuringGC:(overflow)");
  1312     // Clear the marking state because we will be restarting
  1313     // marking due to overflowing the global mark stack.
  1314     reset_marking_state();
  1315   } else {
  1316     // Aggregate the per-task counting data that we have accumulated
  1317     // while marking.
  1318     aggregate_count_data();
  1320     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  1321     // We're done with marking.
  1322     // This is the end of  the marking cycle, we're expected all
  1323     // threads to have SATB queues with active set to true.
  1324     satb_mq_set.set_active_all_threads(false, /* new active value */
  1325                                        true /* expected_active */);
  1327     if (VerifyDuringGC) {
  1328       HandleMark hm;  // handle scope
  1329       Universe::heap()->prepare_for_verify();
  1330       Universe::verify(VerifyOption_G1UseNextMarking,
  1331                        " VerifyDuringGC:(after)");
  1333     assert(!restart_for_overflow(), "sanity");
  1334     // Completely reset the marking state since marking completed
  1335     set_non_marking_state();
  1338   // Expand the marking stack, if we have to and if we can.
  1339   if (_markStack.should_expand()) {
  1340     _markStack.expand();
  1343   // Statistics
  1344   double now = os::elapsedTime();
  1345   _remark_mark_times.add((mark_work_end - start) * 1000.0);
  1346   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
  1347   _remark_times.add((now - start) * 1000.0);
  1349   g1p->record_concurrent_mark_remark_end();
  1351   G1CMIsAliveClosure is_alive(g1h);
  1352   g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
  1355 // Base class of the closures that finalize and verify the
  1356 // liveness counting data.
  1357 class CMCountDataClosureBase: public HeapRegionClosure {
  1358 protected:
  1359   G1CollectedHeap* _g1h;
  1360   ConcurrentMark* _cm;
  1361   CardTableModRefBS* _ct_bs;
  1363   BitMap* _region_bm;
  1364   BitMap* _card_bm;
  1366   // Takes a region that's not empty (i.e., it has at least one
  1367   // live object in it and sets its corresponding bit on the region
  1368   // bitmap to 1. If the region is "starts humongous" it will also set
  1369   // to 1 the bits on the region bitmap that correspond to its
  1370   // associated "continues humongous" regions.
  1371   void set_bit_for_region(HeapRegion* hr) {
  1372     assert(!hr->continuesHumongous(), "should have filtered those out");
  1374     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
  1375     if (!hr->startsHumongous()) {
  1376       // Normal (non-humongous) case: just set the bit.
  1377       _region_bm->par_at_put(index, true);
  1378     } else {
  1379       // Starts humongous case: calculate how many regions are part of
  1380       // this humongous region and then set the bit range.
  1381       BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
  1382       _region_bm->par_at_put_range(index, end_index, true);
  1386 public:
  1387   CMCountDataClosureBase(G1CollectedHeap* g1h,
  1388                          BitMap* region_bm, BitMap* card_bm):
  1389     _g1h(g1h), _cm(g1h->concurrent_mark()),
  1390     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
  1391     _region_bm(region_bm), _card_bm(card_bm) { }
  1392 };
  1394 // Closure that calculates the # live objects per region. Used
  1395 // for verification purposes during the cleanup pause.
  1396 class CalcLiveObjectsClosure: public CMCountDataClosureBase {
  1397   CMBitMapRO* _bm;
  1398   size_t _region_marked_bytes;
  1400 public:
  1401   CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
  1402                          BitMap* region_bm, BitMap* card_bm) :
  1403     CMCountDataClosureBase(g1h, region_bm, card_bm),
  1404     _bm(bm), _region_marked_bytes(0) { }
  1406   bool doHeapRegion(HeapRegion* hr) {
  1408     if (hr->continuesHumongous()) {
  1409       // We will ignore these here and process them when their
  1410       // associated "starts humongous" region is processed (see
  1411       // set_bit_for_heap_region()). Note that we cannot rely on their
  1412       // associated "starts humongous" region to have their bit set to
  1413       // 1 since, due to the region chunking in the parallel region
  1414       // iteration, a "continues humongous" region might be visited
  1415       // before its associated "starts humongous".
  1416       return false;
  1419     HeapWord* ntams = hr->next_top_at_mark_start();
  1420     HeapWord* start = hr->bottom();
  1422     assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
  1423            err_msg("Preconditions not met - "
  1424                    "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
  1425                    start, ntams, hr->end()));
  1427     // Find the first marked object at or after "start".
  1428     start = _bm->getNextMarkedWordAddress(start, ntams);
  1430     size_t marked_bytes = 0;
  1432     while (start < ntams) {
  1433       oop obj = oop(start);
  1434       int obj_sz = obj->size();
  1435       HeapWord* obj_end = start + obj_sz;
  1437       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
  1438       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
  1440       // Note: if we're looking at the last region in heap - obj_end
  1441       // could be actually just beyond the end of the heap; end_idx
  1442       // will then correspond to a (non-existent) card that is also
  1443       // just beyond the heap.
  1444       if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
  1445         // end of object is not card aligned - increment to cover
  1446         // all the cards spanned by the object
  1447         end_idx += 1;
  1450       // Set the bits in the card BM for the cards spanned by this object.
  1451       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
  1453       // Add the size of this object to the number of marked bytes.
  1454       marked_bytes += (size_t)obj_sz * HeapWordSize;
  1456       // Find the next marked object after this one.
  1457       start = _bm->getNextMarkedWordAddress(obj_end, ntams);
  1460     // Mark the allocated-since-marking portion...
  1461     HeapWord* top = hr->top();
  1462     if (ntams < top) {
  1463       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
  1464       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
  1466       // Note: if we're looking at the last region in heap - top
  1467       // could be actually just beyond the end of the heap; end_idx
  1468       // will then correspond to a (non-existent) card that is also
  1469       // just beyond the heap.
  1470       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
  1471         // end of object is not card aligned - increment to cover
  1472         // all the cards spanned by the object
  1473         end_idx += 1;
  1475       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
  1477       // This definitely means the region has live objects.
  1478       set_bit_for_region(hr);
  1481     // Update the live region bitmap.
  1482     if (marked_bytes > 0) {
  1483       set_bit_for_region(hr);
  1486     // Set the marked bytes for the current region so that
  1487     // it can be queried by a calling verificiation routine
  1488     _region_marked_bytes = marked_bytes;
  1490     return false;
  1493   size_t region_marked_bytes() const { return _region_marked_bytes; }
  1494 };
  1496 // Heap region closure used for verifying the counting data
  1497 // that was accumulated concurrently and aggregated during
  1498 // the remark pause. This closure is applied to the heap
  1499 // regions during the STW cleanup pause.
  1501 class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
  1502   G1CollectedHeap* _g1h;
  1503   ConcurrentMark* _cm;
  1504   CalcLiveObjectsClosure _calc_cl;
  1505   BitMap* _region_bm;   // Region BM to be verified
  1506   BitMap* _card_bm;     // Card BM to be verified
  1507   bool _verbose;        // verbose output?
  1509   BitMap* _exp_region_bm; // Expected Region BM values
  1510   BitMap* _exp_card_bm;   // Expected card BM values
  1512   int _failures;
  1514 public:
  1515   VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
  1516                                 BitMap* region_bm,
  1517                                 BitMap* card_bm,
  1518                                 BitMap* exp_region_bm,
  1519                                 BitMap* exp_card_bm,
  1520                                 bool verbose) :
  1521     _g1h(g1h), _cm(g1h->concurrent_mark()),
  1522     _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
  1523     _region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
  1524     _exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
  1525     _failures(0) { }
  1527   int failures() const { return _failures; }
  1529   bool doHeapRegion(HeapRegion* hr) {
  1530     if (hr->continuesHumongous()) {
  1531       // We will ignore these here and process them when their
  1532       // associated "starts humongous" region is processed (see
  1533       // set_bit_for_heap_region()). Note that we cannot rely on their
  1534       // associated "starts humongous" region to have their bit set to
  1535       // 1 since, due to the region chunking in the parallel region
  1536       // iteration, a "continues humongous" region might be visited
  1537       // before its associated "starts humongous".
  1538       return false;
  1541     int failures = 0;
  1543     // Call the CalcLiveObjectsClosure to walk the marking bitmap for
  1544     // this region and set the corresponding bits in the expected region
  1545     // and card bitmaps.
  1546     bool res = _calc_cl.doHeapRegion(hr);
  1547     assert(res == false, "should be continuing");
  1549     MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
  1550                     Mutex::_no_safepoint_check_flag);
  1552     // Verify the marked bytes for this region.
  1553     size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
  1554     size_t act_marked_bytes = hr->next_marked_bytes();
  1556     // We're not OK if expected marked bytes > actual marked bytes. It means
  1557     // we have missed accounting some objects during the actual marking.
  1558     if (exp_marked_bytes > act_marked_bytes) {
  1559       if (_verbose) {
  1560         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
  1561                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
  1562                                hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
  1564       failures += 1;
  1567     // Verify the bit, for this region, in the actual and expected
  1568     // (which was just calculated) region bit maps.
  1569     // We're not OK if the bit in the calculated expected region
  1570     // bitmap is set and the bit in the actual region bitmap is not.
  1571     BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
  1573     bool expected = _exp_region_bm->at(index);
  1574     bool actual = _region_bm->at(index);
  1575     if (expected && !actual) {
  1576       if (_verbose) {
  1577         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
  1578                                "expected: %s, actual: %s",
  1579                                hr->hrs_index(),
  1580                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
  1582       failures += 1;
  1585     // Verify that the card bit maps for the cards spanned by the current
  1586     // region match. We have an error if we have a set bit in the expected
  1587     // bit map and the corresponding bit in the actual bitmap is not set.
  1589     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(hr->bottom());
  1590     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(hr->top());
  1592     for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
  1593       expected = _exp_card_bm->at(i);
  1594       actual = _card_bm->at(i);
  1596       if (expected && !actual) {
  1597         if (_verbose) {
  1598           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
  1599                                  "expected: %s, actual: %s",
  1600                                  hr->hrs_index(), i,
  1601                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
  1603         failures += 1;
  1607     if (failures > 0 && _verbose)  {
  1608       gclog_or_tty->print_cr("Region " HR_FORMAT ", ntams: " PTR_FORMAT ", "
  1609                              "marked_bytes: calc/actual " SIZE_FORMAT "/" SIZE_FORMAT,
  1610                              HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start(),
  1611                              _calc_cl.region_marked_bytes(), hr->next_marked_bytes());
  1614     _failures += failures;
  1616     // We could stop iteration over the heap when we
  1617     // find the first violating region by returning true.
  1618     return false;
  1620 };
  1622 class G1ParVerifyFinalCountTask: public AbstractGangTask {
  1623 protected:
  1624   G1CollectedHeap* _g1h;
  1625   ConcurrentMark* _cm;
  1626   BitMap* _actual_region_bm;
  1627   BitMap* _actual_card_bm;
  1629   uint    _n_workers;
  1631   BitMap* _expected_region_bm;
  1632   BitMap* _expected_card_bm;
  1634   int  _failures;
  1635   bool _verbose;
  1637 public:
  1638   G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
  1639                             BitMap* region_bm, BitMap* card_bm,
  1640                             BitMap* expected_region_bm, BitMap* expected_card_bm)
  1641     : AbstractGangTask("G1 verify final counting"),
  1642       _g1h(g1h), _cm(_g1h->concurrent_mark()),
  1643       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
  1644       _expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
  1645       _failures(0), _verbose(false),
  1646       _n_workers(0) {
  1647     assert(VerifyDuringGC, "don't call this otherwise");
  1649     // Use the value already set as the number of active threads
  1650     // in the call to run_task().
  1651     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1652       assert( _g1h->workers()->active_workers() > 0,
  1653         "Should have been previously set");
  1654       _n_workers = _g1h->workers()->active_workers();
  1655     } else {
  1656       _n_workers = 1;
  1659     assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
  1660     assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
  1662     _verbose = _cm->verbose_medium();
  1665   void work(uint worker_id) {
  1666     assert(worker_id < _n_workers, "invariant");
  1668     VerifyLiveObjectDataHRClosure verify_cl(_g1h,
  1669                                             _actual_region_bm, _actual_card_bm,
  1670                                             _expected_region_bm,
  1671                                             _expected_card_bm,
  1672                                             _verbose);
  1674     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1675       _g1h->heap_region_par_iterate_chunked(&verify_cl,
  1676                                             worker_id,
  1677                                             _n_workers,
  1678                                             HeapRegion::VerifyCountClaimValue);
  1679     } else {
  1680       _g1h->heap_region_iterate(&verify_cl);
  1683     Atomic::add(verify_cl.failures(), &_failures);
  1686   int failures() const { return _failures; }
  1687 };
  1689 // Closure that finalizes the liveness counting data.
  1690 // Used during the cleanup pause.
  1691 // Sets the bits corresponding to the interval [NTAMS, top]
  1692 // (which contains the implicitly live objects) in the
  1693 // card liveness bitmap. Also sets the bit for each region,
  1694 // containing live data, in the region liveness bitmap.
  1696 class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
  1697  public:
  1698   FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
  1699                               BitMap* region_bm,
  1700                               BitMap* card_bm) :
  1701     CMCountDataClosureBase(g1h, region_bm, card_bm) { }
  1703   bool doHeapRegion(HeapRegion* hr) {
  1705     if (hr->continuesHumongous()) {
  1706       // We will ignore these here and process them when their
  1707       // associated "starts humongous" region is processed (see
  1708       // set_bit_for_heap_region()). Note that we cannot rely on their
  1709       // associated "starts humongous" region to have their bit set to
  1710       // 1 since, due to the region chunking in the parallel region
  1711       // iteration, a "continues humongous" region might be visited
  1712       // before its associated "starts humongous".
  1713       return false;
  1716     HeapWord* ntams = hr->next_top_at_mark_start();
  1717     HeapWord* top   = hr->top();
  1719     assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
  1721     // Mark the allocated-since-marking portion...
  1722     if (ntams < top) {
  1723       // This definitely means the region has live objects.
  1724       set_bit_for_region(hr);
  1726       // Now set the bits in the card bitmap for [ntams, top)
  1727       BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
  1728       BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
  1730       // Note: if we're looking at the last region in heap - top
  1731       // could be actually just beyond the end of the heap; end_idx
  1732       // will then correspond to a (non-existent) card that is also
  1733       // just beyond the heap.
  1734       if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
  1735         // end of object is not card aligned - increment to cover
  1736         // all the cards spanned by the object
  1737         end_idx += 1;
  1740       assert(end_idx <= _card_bm->size(),
  1741              err_msg("oob: end_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
  1742                      end_idx, _card_bm->size()));
  1743       assert(start_idx < _card_bm->size(),
  1744              err_msg("oob: start_idx=  "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
  1745                      start_idx, _card_bm->size()));
  1747       _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
  1750     // Set the bit for the region if it contains live data
  1751     if (hr->next_marked_bytes() > 0) {
  1752       set_bit_for_region(hr);
  1755     return false;
  1757 };
  1759 class G1ParFinalCountTask: public AbstractGangTask {
  1760 protected:
  1761   G1CollectedHeap* _g1h;
  1762   ConcurrentMark* _cm;
  1763   BitMap* _actual_region_bm;
  1764   BitMap* _actual_card_bm;
  1766   uint    _n_workers;
  1768 public:
  1769   G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
  1770     : AbstractGangTask("G1 final counting"),
  1771       _g1h(g1h), _cm(_g1h->concurrent_mark()),
  1772       _actual_region_bm(region_bm), _actual_card_bm(card_bm),
  1773       _n_workers(0) {
  1774     // Use the value already set as the number of active threads
  1775     // in the call to run_task().
  1776     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1777       assert( _g1h->workers()->active_workers() > 0,
  1778         "Should have been previously set");
  1779       _n_workers = _g1h->workers()->active_workers();
  1780     } else {
  1781       _n_workers = 1;
  1785   void work(uint worker_id) {
  1786     assert(worker_id < _n_workers, "invariant");
  1788     FinalCountDataUpdateClosure final_update_cl(_g1h,
  1789                                                 _actual_region_bm,
  1790                                                 _actual_card_bm);
  1792     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1793       _g1h->heap_region_par_iterate_chunked(&final_update_cl,
  1794                                             worker_id,
  1795                                             _n_workers,
  1796                                             HeapRegion::FinalCountClaimValue);
  1797     } else {
  1798       _g1h->heap_region_iterate(&final_update_cl);
  1801 };
  1803 class G1ParNoteEndTask;
  1805 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
  1806   G1CollectedHeap* _g1;
  1807   int _worker_num;
  1808   size_t _max_live_bytes;
  1809   uint _regions_claimed;
  1810   size_t _freed_bytes;
  1811   FreeRegionList* _local_cleanup_list;
  1812   OldRegionSet* _old_proxy_set;
  1813   HumongousRegionSet* _humongous_proxy_set;
  1814   HRRSCleanupTask* _hrrs_cleanup_task;
  1815   double _claimed_region_time;
  1816   double _max_region_time;
  1818 public:
  1819   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
  1820                              int worker_num,
  1821                              FreeRegionList* local_cleanup_list,
  1822                              OldRegionSet* old_proxy_set,
  1823                              HumongousRegionSet* humongous_proxy_set,
  1824                              HRRSCleanupTask* hrrs_cleanup_task) :
  1825     _g1(g1), _worker_num(worker_num),
  1826     _max_live_bytes(0), _regions_claimed(0),
  1827     _freed_bytes(0),
  1828     _claimed_region_time(0.0), _max_region_time(0.0),
  1829     _local_cleanup_list(local_cleanup_list),
  1830     _old_proxy_set(old_proxy_set),
  1831     _humongous_proxy_set(humongous_proxy_set),
  1832     _hrrs_cleanup_task(hrrs_cleanup_task) { }
  1834   size_t freed_bytes() { return _freed_bytes; }
  1836   bool doHeapRegion(HeapRegion *hr) {
  1837     if (hr->continuesHumongous()) {
  1838       return false;
  1840     // We use a claim value of zero here because all regions
  1841     // were claimed with value 1 in the FinalCount task.
  1842     _g1->reset_gc_time_stamps(hr);
  1843     double start = os::elapsedTime();
  1844     _regions_claimed++;
  1845     hr->note_end_of_marking();
  1846     _max_live_bytes += hr->max_live_bytes();
  1847     _g1->free_region_if_empty(hr,
  1848                               &_freed_bytes,
  1849                               _local_cleanup_list,
  1850                               _old_proxy_set,
  1851                               _humongous_proxy_set,
  1852                               _hrrs_cleanup_task,
  1853                               true /* par */);
  1854     double region_time = (os::elapsedTime() - start);
  1855     _claimed_region_time += region_time;
  1856     if (region_time > _max_region_time) {
  1857       _max_region_time = region_time;
  1859     return false;
  1862   size_t max_live_bytes() { return _max_live_bytes; }
  1863   uint regions_claimed() { return _regions_claimed; }
  1864   double claimed_region_time_sec() { return _claimed_region_time; }
  1865   double max_region_time_sec() { return _max_region_time; }
  1866 };
  1868 class G1ParNoteEndTask: public AbstractGangTask {
  1869   friend class G1NoteEndOfConcMarkClosure;
  1871 protected:
  1872   G1CollectedHeap* _g1h;
  1873   size_t _max_live_bytes;
  1874   size_t _freed_bytes;
  1875   FreeRegionList* _cleanup_list;
  1877 public:
  1878   G1ParNoteEndTask(G1CollectedHeap* g1h,
  1879                    FreeRegionList* cleanup_list) :
  1880     AbstractGangTask("G1 note end"), _g1h(g1h),
  1881     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
  1883   void work(uint worker_id) {
  1884     double start = os::elapsedTime();
  1885     FreeRegionList local_cleanup_list("Local Cleanup List");
  1886     OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
  1887     HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
  1888     HRRSCleanupTask hrrs_cleanup_task;
  1889     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
  1890                                            &old_proxy_set,
  1891                                            &humongous_proxy_set,
  1892                                            &hrrs_cleanup_task);
  1893     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1894       _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
  1895                                             _g1h->workers()->active_workers(),
  1896                                             HeapRegion::NoteEndClaimValue);
  1897     } else {
  1898       _g1h->heap_region_iterate(&g1_note_end);
  1900     assert(g1_note_end.complete(), "Shouldn't have yielded!");
  1902     // Now update the lists
  1903     _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
  1904                                             NULL /* free_list */,
  1905                                             &old_proxy_set,
  1906                                             &humongous_proxy_set,
  1907                                             true /* par */);
  1909       MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  1910       _max_live_bytes += g1_note_end.max_live_bytes();
  1911       _freed_bytes += g1_note_end.freed_bytes();
  1913       // If we iterate over the global cleanup list at the end of
  1914       // cleanup to do this printing we will not guarantee to only
  1915       // generate output for the newly-reclaimed regions (the list
  1916       // might not be empty at the beginning of cleanup; we might
  1917       // still be working on its previous contents). So we do the
  1918       // printing here, before we append the new regions to the global
  1919       // cleanup list.
  1921       G1HRPrinter* hr_printer = _g1h->hr_printer();
  1922       if (hr_printer->is_active()) {
  1923         HeapRegionLinkedListIterator iter(&local_cleanup_list);
  1924         while (iter.more_available()) {
  1925           HeapRegion* hr = iter.get_next();
  1926           hr_printer->cleanup(hr);
  1930       _cleanup_list->add_as_tail(&local_cleanup_list);
  1931       assert(local_cleanup_list.is_empty(), "post-condition");
  1933       HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task);
  1936   size_t max_live_bytes() { return _max_live_bytes; }
  1937   size_t freed_bytes() { return _freed_bytes; }
  1938 };
  1940 class G1ParScrubRemSetTask: public AbstractGangTask {
  1941 protected:
  1942   G1RemSet* _g1rs;
  1943   BitMap* _region_bm;
  1944   BitMap* _card_bm;
  1945 public:
  1946   G1ParScrubRemSetTask(G1CollectedHeap* g1h,
  1947                        BitMap* region_bm, BitMap* card_bm) :
  1948     AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
  1949     _region_bm(region_bm), _card_bm(card_bm) { }
  1951   void work(uint worker_id) {
  1952     if (G1CollectedHeap::use_parallel_gc_threads()) {
  1953       _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
  1954                        HeapRegion::ScrubRemSetClaimValue);
  1955     } else {
  1956       _g1rs->scrub(_region_bm, _card_bm);
  1960 };
  1962 void ConcurrentMark::cleanup() {
  1963   // world is stopped at this checkpoint
  1964   assert(SafepointSynchronize::is_at_safepoint(),
  1965          "world should be stopped");
  1966   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1968   // If a full collection has happened, we shouldn't do this.
  1969   if (has_aborted()) {
  1970     g1h->set_marking_complete(); // So bitmap clearing isn't confused
  1971     return;
  1974   HRSPhaseSetter x(HRSPhaseCleanup);
  1975   g1h->verify_region_sets_optional();
  1977   if (VerifyDuringGC) {
  1978     HandleMark hm;  // handle scope
  1979     Universe::heap()->prepare_for_verify();
  1980     Universe::verify(VerifyOption_G1UsePrevMarking,
  1981                      " VerifyDuringGC:(before)");
  1984   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
  1985   g1p->record_concurrent_mark_cleanup_start();
  1987   double start = os::elapsedTime();
  1989   HeapRegionRemSet::reset_for_cleanup_tasks();
  1991   uint n_workers;
  1993   // Do counting once more with the world stopped for good measure.
  1994   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
  1996   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1997    assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  1998            "sanity check");
  2000     g1h->set_par_threads();
  2001     n_workers = g1h->n_par_threads();
  2002     assert(g1h->n_par_threads() == n_workers,
  2003            "Should not have been reset");
  2004     g1h->workers()->run_task(&g1_par_count_task);
  2005     // Done with the parallel phase so reset to 0.
  2006     g1h->set_par_threads(0);
  2008     assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
  2009            "sanity check");
  2010   } else {
  2011     n_workers = 1;
  2012     g1_par_count_task.work(0);
  2015   if (VerifyDuringGC) {
  2016     // Verify that the counting data accumulated during marking matches
  2017     // that calculated by walking the marking bitmap.
  2019     // Bitmaps to hold expected values
  2020     BitMap expected_region_bm(_region_bm.size(), false);
  2021     BitMap expected_card_bm(_card_bm.size(), false);
  2023     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
  2024                                                  &_region_bm,
  2025                                                  &_card_bm,
  2026                                                  &expected_region_bm,
  2027                                                  &expected_card_bm);
  2029     if (G1CollectedHeap::use_parallel_gc_threads()) {
  2030       g1h->set_par_threads((int)n_workers);
  2031       g1h->workers()->run_task(&g1_par_verify_task);
  2032       // Done with the parallel phase so reset to 0.
  2033       g1h->set_par_threads(0);
  2035       assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
  2036              "sanity check");
  2037     } else {
  2038       g1_par_verify_task.work(0);
  2041     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
  2044   size_t start_used_bytes = g1h->used();
  2045   g1h->set_marking_complete();
  2047   double count_end = os::elapsedTime();
  2048   double this_final_counting_time = (count_end - start);
  2049   _total_counting_time += this_final_counting_time;
  2051   if (G1PrintRegionLivenessInfo) {
  2052     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
  2053     _g1h->heap_region_iterate(&cl);
  2056   // Install newly created mark bitMap as "prev".
  2057   swapMarkBitMaps();
  2059   g1h->reset_gc_time_stamp();
  2061   // Note end of marking in all heap regions.
  2062   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
  2063   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2064     g1h->set_par_threads((int)n_workers);
  2065     g1h->workers()->run_task(&g1_par_note_end_task);
  2066     g1h->set_par_threads(0);
  2068     assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
  2069            "sanity check");
  2070   } else {
  2071     g1_par_note_end_task.work(0);
  2073   g1h->check_gc_time_stamps();
  2075   if (!cleanup_list_is_empty()) {
  2076     // The cleanup list is not empty, so we'll have to process it
  2077     // concurrently. Notify anyone else that might be wanting free
  2078     // regions that there will be more free regions coming soon.
  2079     g1h->set_free_regions_coming();
  2082   // call below, since it affects the metric by which we sort the heap
  2083   // regions.
  2084   if (G1ScrubRemSets) {
  2085     double rs_scrub_start = os::elapsedTime();
  2086     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
  2087     if (G1CollectedHeap::use_parallel_gc_threads()) {
  2088       g1h->set_par_threads((int)n_workers);
  2089       g1h->workers()->run_task(&g1_par_scrub_rs_task);
  2090       g1h->set_par_threads(0);
  2092       assert(g1h->check_heap_region_claim_values(
  2093                                             HeapRegion::ScrubRemSetClaimValue),
  2094              "sanity check");
  2095     } else {
  2096       g1_par_scrub_rs_task.work(0);
  2099     double rs_scrub_end = os::elapsedTime();
  2100     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
  2101     _total_rs_scrub_time += this_rs_scrub_time;
  2104   // this will also free any regions totally full of garbage objects,
  2105   // and sort the regions.
  2106   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
  2108   // Statistics.
  2109   double end = os::elapsedTime();
  2110   _cleanup_times.add((end - start) * 1000.0);
  2112   if (G1Log::fine()) {
  2113     g1h->print_size_transition(gclog_or_tty,
  2114                                start_used_bytes,
  2115                                g1h->used(),
  2116                                g1h->capacity());
  2119   // Clean up will have freed any regions completely full of garbage.
  2120   // Update the soft reference policy with the new heap occupancy.
  2121   Universe::update_heap_info_at_gc();
  2123   // We need to make this be a "collection" so any collection pause that
  2124   // races with it goes around and waits for completeCleanup to finish.
  2125   g1h->increment_total_collections();
  2127   // We reclaimed old regions so we should calculate the sizes to make
  2128   // sure we update the old gen/space data.
  2129   g1h->g1mm()->update_sizes();
  2131   if (VerifyDuringGC) {
  2132     HandleMark hm;  // handle scope
  2133     Universe::heap()->prepare_for_verify();
  2134     Universe::verify(VerifyOption_G1UsePrevMarking,
  2135                      " VerifyDuringGC:(after)");
  2138   g1h->verify_region_sets_optional();
  2139   g1h->trace_heap_after_concurrent_cycle();
  2142 void ConcurrentMark::completeCleanup() {
  2143   if (has_aborted()) return;
  2145   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  2147   _cleanup_list.verify_optional();
  2148   FreeRegionList tmp_free_list("Tmp Free List");
  2150   if (G1ConcRegionFreeingVerbose) {
  2151     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
  2152                            "cleanup list has %u entries",
  2153                            _cleanup_list.length());
  2156   // Noone else should be accessing the _cleanup_list at this point,
  2157   // so it's not necessary to take any locks
  2158   while (!_cleanup_list.is_empty()) {
  2159     HeapRegion* hr = _cleanup_list.remove_head();
  2160     assert(hr != NULL, "the list was not empty");
  2161     hr->par_clear();
  2162     tmp_free_list.add_as_tail(hr);
  2164     // Instead of adding one region at a time to the secondary_free_list,
  2165     // we accumulate them in the local list and move them a few at a
  2166     // time. This also cuts down on the number of notify_all() calls
  2167     // we do during this process. We'll also append the local list when
  2168     // _cleanup_list is empty (which means we just removed the last
  2169     // region from the _cleanup_list).
  2170     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
  2171         _cleanup_list.is_empty()) {
  2172       if (G1ConcRegionFreeingVerbose) {
  2173         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
  2174                                "appending %u entries to the secondary_free_list, "
  2175                                "cleanup list still has %u entries",
  2176                                tmp_free_list.length(),
  2177                                _cleanup_list.length());
  2181         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
  2182         g1h->secondary_free_list_add_as_tail(&tmp_free_list);
  2183         SecondaryFreeList_lock->notify_all();
  2186       if (G1StressConcRegionFreeing) {
  2187         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
  2188           os::sleep(Thread::current(), (jlong) 1, false);
  2193   assert(tmp_free_list.is_empty(), "post-condition");
  2196 // Supporting Object and Oop closures for reference discovery
  2197 // and processing in during marking
  2199 bool G1CMIsAliveClosure::do_object_b(oop obj) {
  2200   HeapWord* addr = (HeapWord*)obj;
  2201   return addr != NULL &&
  2202          (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
  2205 // 'Keep Alive' oop closure used by both serial parallel reference processing.
  2206 // Uses the CMTask associated with a worker thread (for serial reference
  2207 // processing the CMTask for worker 0 is used) to preserve (mark) and
  2208 // trace referent objects.
  2209 //
  2210 // Using the CMTask and embedded local queues avoids having the worker
  2211 // threads operating on the global mark stack. This reduces the risk
  2212 // of overflowing the stack - which we would rather avoid at this late
  2213 // state. Also using the tasks' local queues removes the potential
  2214 // of the workers interfering with each other that could occur if
  2215 // operating on the global stack.
  2217 class G1CMKeepAliveAndDrainClosure: public OopClosure {
  2218   ConcurrentMark* _cm;
  2219   CMTask*         _task;
  2220   int             _ref_counter_limit;
  2221   int             _ref_counter;
  2222   bool            _is_serial;
  2223  public:
  2224   G1CMKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
  2225     _cm(cm), _task(task), _is_serial(is_serial),
  2226     _ref_counter_limit(G1RefProcDrainInterval) {
  2227     assert(_ref_counter_limit > 0, "sanity");
  2228     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
  2229     _ref_counter = _ref_counter_limit;
  2232   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  2233   virtual void do_oop(      oop* p) { do_oop_work(p); }
  2235   template <class T> void do_oop_work(T* p) {
  2236     if (!_cm->has_overflown()) {
  2237       oop obj = oopDesc::load_decode_heap_oop(p);
  2238       if (_cm->verbose_high()) {
  2239         gclog_or_tty->print_cr("\t[%u] we're looking at location "
  2240                                "*"PTR_FORMAT" = "PTR_FORMAT,
  2241                                _task->worker_id(), p, (void*) obj);
  2244       _task->deal_with_reference(obj);
  2245       _ref_counter--;
  2247       if (_ref_counter == 0) {
  2248         // We have dealt with _ref_counter_limit references, pushing them
  2249         // and objects reachable from them on to the local stack (and
  2250         // possibly the global stack). Call CMTask::do_marking_step() to
  2251         // process these entries.
  2252         //
  2253         // We call CMTask::do_marking_step() in a loop, which we'll exit if
  2254         // there's nothing more to do (i.e. we're done with the entries that
  2255         // were pushed as a result of the CMTask::deal_with_reference() calls
  2256         // above) or we overflow.
  2257         //
  2258         // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
  2259         // flag while there may still be some work to do. (See the comment at
  2260         // the beginning of CMTask::do_marking_step() for those conditions -
  2261         // one of which is reaching the specified time target.) It is only
  2262         // when CMTask::do_marking_step() returns without setting the
  2263         // has_aborted() flag that the marking step has completed.
  2264         do {
  2265           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
  2266           _task->do_marking_step(mark_step_duration_ms,
  2267                                  false      /* do_termination */,
  2268                                  _is_serial);
  2269         } while (_task->has_aborted() && !_cm->has_overflown());
  2270         _ref_counter = _ref_counter_limit;
  2272     } else {
  2273       if (_cm->verbose_high()) {
  2274          gclog_or_tty->print_cr("\t[%u] CM Overflow", _task->worker_id());
  2278 };
  2280 // 'Drain' oop closure used by both serial and parallel reference processing.
  2281 // Uses the CMTask associated with a given worker thread (for serial
  2282 // reference processing the CMtask for worker 0 is used). Calls the
  2283 // do_marking_step routine, with an unbelievably large timeout value,
  2284 // to drain the marking data structures of the remaining entries
  2285 // added by the 'keep alive' oop closure above.
  2287 class G1CMDrainMarkingStackClosure: public VoidClosure {
  2288   ConcurrentMark* _cm;
  2289   CMTask*         _task;
  2290   bool            _is_serial;
  2291  public:
  2292   G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_serial) :
  2293     _cm(cm), _task(task), _is_serial(is_serial) {
  2294     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
  2297   void do_void() {
  2298     do {
  2299       if (_cm->verbose_high()) {
  2300         gclog_or_tty->print_cr("\t[%u] Drain: Calling do_marking_step - serial: %s",
  2301                                _task->worker_id(), BOOL_TO_STR(_is_serial));
  2304       // We call CMTask::do_marking_step() to completely drain the local
  2305       // and global marking stacks of entries pushed by the 'keep alive'
  2306       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
  2307       //
  2308       // CMTask::do_marking_step() is called in a loop, which we'll exit
  2309       // if there's nothing more to do (i.e. we'completely drained the
  2310       // entries that were pushed as a a result of applying the 'keep alive'
  2311       // closure to the entries on the discovered ref lists) or we overflow
  2312       // the global marking stack.
  2313       //
  2314       // Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
  2315       // flag while there may still be some work to do. (See the comment at
  2316       // the beginning of CMTask::do_marking_step() for those conditions -
  2317       // one of which is reaching the specified time target.) It is only
  2318       // when CMTask::do_marking_step() returns without setting the
  2319       // has_aborted() flag that the marking step has completed.
  2321       _task->do_marking_step(1000000000.0 /* something very large */,
  2322                              true         /* do_termination */,
  2323                              _is_serial);
  2324     } while (_task->has_aborted() && !_cm->has_overflown());
  2326 };
  2328 // Implementation of AbstractRefProcTaskExecutor for parallel
  2329 // reference processing at the end of G1 concurrent marking
  2331 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
  2332 private:
  2333   G1CollectedHeap* _g1h;
  2334   ConcurrentMark*  _cm;
  2335   WorkGang*        _workers;
  2336   int              _active_workers;
  2338 public:
  2339   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
  2340                         ConcurrentMark* cm,
  2341                         WorkGang* workers,
  2342                         int n_workers) :
  2343     _g1h(g1h), _cm(cm),
  2344     _workers(workers), _active_workers(n_workers) { }
  2346   // Executes the given task using concurrent marking worker threads.
  2347   virtual void execute(ProcessTask& task);
  2348   virtual void execute(EnqueueTask& task);
  2349 };
  2351 class G1CMRefProcTaskProxy: public AbstractGangTask {
  2352   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
  2353   ProcessTask&     _proc_task;
  2354   G1CollectedHeap* _g1h;
  2355   ConcurrentMark*  _cm;
  2357 public:
  2358   G1CMRefProcTaskProxy(ProcessTask& proc_task,
  2359                      G1CollectedHeap* g1h,
  2360                      ConcurrentMark* cm) :
  2361     AbstractGangTask("Process reference objects in parallel"),
  2362     _proc_task(proc_task), _g1h(g1h), _cm(cm) {
  2363     ReferenceProcessor* rp = _g1h->ref_processor_cm();
  2364     assert(rp->processing_is_mt(), "shouldn't be here otherwise");
  2367   virtual void work(uint worker_id) {
  2368     CMTask* task = _cm->task(worker_id);
  2369     G1CMIsAliveClosure g1_is_alive(_g1h);
  2370     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
  2371     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
  2373     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
  2375 };
  2377 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
  2378   assert(_workers != NULL, "Need parallel worker threads.");
  2379   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
  2381   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
  2383   // We need to reset the concurrency level before each
  2384   // proxy task execution, so that the termination protocol
  2385   // and overflow handling in CMTask::do_marking_step() knows
  2386   // how many workers to wait for.
  2387   _cm->set_concurrency(_active_workers);
  2388   _g1h->set_par_threads(_active_workers);
  2389   _workers->run_task(&proc_task_proxy);
  2390   _g1h->set_par_threads(0);
  2393 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
  2394   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
  2395   EnqueueTask& _enq_task;
  2397 public:
  2398   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
  2399     AbstractGangTask("Enqueue reference objects in parallel"),
  2400     _enq_task(enq_task) { }
  2402   virtual void work(uint worker_id) {
  2403     _enq_task.work(worker_id);
  2405 };
  2407 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
  2408   assert(_workers != NULL, "Need parallel worker threads.");
  2409   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
  2411   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
  2413   // Not strictly necessary but...
  2414   //
  2415   // We need to reset the concurrency level before each
  2416   // proxy task execution, so that the termination protocol
  2417   // and overflow handling in CMTask::do_marking_step() knows
  2418   // how many workers to wait for.
  2419   _cm->set_concurrency(_active_workers);
  2420   _g1h->set_par_threads(_active_workers);
  2421   _workers->run_task(&enq_task_proxy);
  2422   _g1h->set_par_threads(0);
  2425 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
  2426   if (has_overflown()) {
  2427     // Skip processing the discovered references if we have
  2428     // overflown the global marking stack. Reference objects
  2429     // only get discovered once so it is OK to not
  2430     // de-populate the discovered reference lists. We could have,
  2431     // but the only benefit would be that, when marking restarts,
  2432     // less reference objects are discovered.
  2433     return;
  2436   ResourceMark rm;
  2437   HandleMark   hm;
  2439   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  2441   // Is alive closure.
  2442   G1CMIsAliveClosure g1_is_alive(g1h);
  2444   // Inner scope to exclude the cleaning of the string and symbol
  2445   // tables from the displayed time.
  2447     if (G1Log::finer()) {
  2448       gclog_or_tty->put(' ');
  2450     GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
  2452     ReferenceProcessor* rp = g1h->ref_processor_cm();
  2454     // See the comment in G1CollectedHeap::ref_processing_init()
  2455     // about how reference processing currently works in G1.
  2457     // Set the soft reference policy
  2458     rp->setup_policy(clear_all_soft_refs);
  2459     assert(_markStack.isEmpty(), "mark stack should be empty");
  2461     // Instances of the 'Keep Alive' and 'Complete GC' closures used
  2462     // in serial reference processing. Note these closures are also
  2463     // used for serially processing (by the the current thread) the
  2464     // JNI references during parallel reference processing.
  2465     //
  2466     // These closures do not need to synchronize with the worker
  2467     // threads involved in parallel reference processing as these
  2468     // instances are executed serially by the current thread (e.g.
  2469     // reference processing is not multi-threaded and is thus
  2470     // performed by the current thread instead of a gang worker).
  2471     //
  2472     // The gang tasks involved in parallel reference procssing create
  2473     // their own instances of these closures, which do their own
  2474     // synchronization among themselves.
  2475     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
  2476     G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
  2478     // We need at least one active thread. If reference processing
  2479     // is not multi-threaded we use the current (VMThread) thread,
  2480     // otherwise we use the work gang from the G1CollectedHeap and
  2481     // we utilize all the worker threads we can.
  2482     bool processing_is_mt = rp->processing_is_mt() && g1h->workers() != NULL;
  2483     uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
  2484     active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
  2486     // Parallel processing task executor.
  2487     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
  2488                                               g1h->workers(), active_workers);
  2489     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
  2491     // Set the concurrency level. The phase was already set prior to
  2492     // executing the remark task.
  2493     set_concurrency(active_workers);
  2495     // Set the degree of MT processing here.  If the discovery was done MT,
  2496     // the number of threads involved during discovery could differ from
  2497     // the number of active workers.  This is OK as long as the discovered
  2498     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
  2499     rp->set_active_mt_degree(active_workers);
  2501     // Process the weak references.
  2502     const ReferenceProcessorStats& stats =
  2503         rp->process_discovered_references(&g1_is_alive,
  2504                                           &g1_keep_alive,
  2505                                           &g1_drain_mark_stack,
  2506                                           executor,
  2507                                           g1h->gc_timer_cm());
  2508     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
  2510     // The do_oop work routines of the keep_alive and drain_marking_stack
  2511     // oop closures will set the has_overflown flag if we overflow the
  2512     // global marking stack.
  2514     assert(_markStack.overflow() || _markStack.isEmpty(),
  2515             "mark stack should be empty (unless it overflowed)");
  2517     if (_markStack.overflow()) {
  2518       // This should have been done already when we tried to push an
  2519       // entry on to the global mark stack. But let's do it again.
  2520       set_has_overflown();
  2523     assert(rp->num_q() == active_workers, "why not");
  2525     rp->enqueue_discovered_references(executor);
  2527     rp->verify_no_references_recorded();
  2528     assert(!rp->discovery_enabled(), "Post condition");
  2531   g1h->unlink_string_and_symbol_table(&g1_is_alive);
  2534 void ConcurrentMark::swapMarkBitMaps() {
  2535   CMBitMapRO* temp = _prevMarkBitMap;
  2536   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
  2537   _nextMarkBitMap  = (CMBitMap*)  temp;
  2540 class CMRemarkTask: public AbstractGangTask {
  2541 private:
  2542   ConcurrentMark* _cm;
  2543   bool            _is_serial;
  2544 public:
  2545   void work(uint worker_id) {
  2546     // Since all available tasks are actually started, we should
  2547     // only proceed if we're supposed to be actived.
  2548     if (worker_id < _cm->active_tasks()) {
  2549       CMTask* task = _cm->task(worker_id);
  2550       task->record_start_time();
  2551       do {
  2552         task->do_marking_step(1000000000.0 /* something very large */,
  2553                               true         /* do_termination       */,
  2554                               _is_serial);
  2555       } while (task->has_aborted() && !_cm->has_overflown());
  2556       // If we overflow, then we do not want to restart. We instead
  2557       // want to abort remark and do concurrent marking again.
  2558       task->record_end_time();
  2562   CMRemarkTask(ConcurrentMark* cm, int active_workers, bool is_serial) :
  2563     AbstractGangTask("Par Remark"), _cm(cm), _is_serial(is_serial) {
  2564     _cm->terminator()->reset_for_reuse(active_workers);
  2566 };
  2568 void ConcurrentMark::checkpointRootsFinalWork() {
  2569   ResourceMark rm;
  2570   HandleMark   hm;
  2571   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  2573   g1h->ensure_parsability(false);
  2575   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2576     G1CollectedHeap::StrongRootsScope srs(g1h);
  2577     // this is remark, so we'll use up all active threads
  2578     uint active_workers = g1h->workers()->active_workers();
  2579     if (active_workers == 0) {
  2580       assert(active_workers > 0, "Should have been set earlier");
  2581       active_workers = (uint) ParallelGCThreads;
  2582       g1h->workers()->set_active_workers(active_workers);
  2584     set_concurrency_and_phase(active_workers, false /* concurrent */);
  2585     // Leave _parallel_marking_threads at it's
  2586     // value originally calculated in the ConcurrentMark
  2587     // constructor and pass values of the active workers
  2588     // through the gang in the task.
  2590     CMRemarkTask remarkTask(this, active_workers, false /* is_serial */);
  2591     // We will start all available threads, even if we decide that the
  2592     // active_workers will be fewer. The extra ones will just bail out
  2593     // immediately.
  2594     g1h->set_par_threads(active_workers);
  2595     g1h->workers()->run_task(&remarkTask);
  2596     g1h->set_par_threads(0);
  2597   } else {
  2598     G1CollectedHeap::StrongRootsScope srs(g1h);
  2599     uint active_workers = 1;
  2600     set_concurrency_and_phase(active_workers, false /* concurrent */);
  2602     // Note - if there's no work gang then the VMThread will be
  2603     // the thread to execute the remark - serially. We have
  2604     // to pass true for the is_serial parameter so that
  2605     // CMTask::do_marking_step() doesn't enter the sync
  2606     // barriers in the event of an overflow. Doing so will
  2607     // cause an assert that the current thread is not a
  2608     // concurrent GC thread.
  2609     CMRemarkTask remarkTask(this, active_workers, true /* is_serial*/);
  2610     remarkTask.work(0);
  2612   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  2613   guarantee(has_overflown() ||
  2614             satb_mq_set.completed_buffers_num() == 0,
  2615             err_msg("Invariant: has_overflown = %s, num buffers = %d",
  2616                     BOOL_TO_STR(has_overflown()),
  2617                     satb_mq_set.completed_buffers_num()));
  2619   print_stats();
  2622 #ifndef PRODUCT
  2624 class PrintReachableOopClosure: public OopClosure {
  2625 private:
  2626   G1CollectedHeap* _g1h;
  2627   outputStream*    _out;
  2628   VerifyOption     _vo;
  2629   bool             _all;
  2631 public:
  2632   PrintReachableOopClosure(outputStream* out,
  2633                            VerifyOption  vo,
  2634                            bool          all) :
  2635     _g1h(G1CollectedHeap::heap()),
  2636     _out(out), _vo(vo), _all(all) { }
  2638   void do_oop(narrowOop* p) { do_oop_work(p); }
  2639   void do_oop(      oop* p) { do_oop_work(p); }
  2641   template <class T> void do_oop_work(T* p) {
  2642     oop         obj = oopDesc::load_decode_heap_oop(p);
  2643     const char* str = NULL;
  2644     const char* str2 = "";
  2646     if (obj == NULL) {
  2647       str = "";
  2648     } else if (!_g1h->is_in_g1_reserved(obj)) {
  2649       str = " O";
  2650     } else {
  2651       HeapRegion* hr  = _g1h->heap_region_containing(obj);
  2652       guarantee(hr != NULL, "invariant");
  2653       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
  2654       bool marked = _g1h->is_marked(obj, _vo);
  2656       if (over_tams) {
  2657         str = " >";
  2658         if (marked) {
  2659           str2 = " AND MARKED";
  2661       } else if (marked) {
  2662         str = " M";
  2663       } else {
  2664         str = " NOT";
  2668     _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
  2669                    p, (void*) obj, str, str2);
  2671 };
  2673 class PrintReachableObjectClosure : public ObjectClosure {
  2674 private:
  2675   G1CollectedHeap* _g1h;
  2676   outputStream*    _out;
  2677   VerifyOption     _vo;
  2678   bool             _all;
  2679   HeapRegion*      _hr;
  2681 public:
  2682   PrintReachableObjectClosure(outputStream* out,
  2683                               VerifyOption  vo,
  2684                               bool          all,
  2685                               HeapRegion*   hr) :
  2686     _g1h(G1CollectedHeap::heap()),
  2687     _out(out), _vo(vo), _all(all), _hr(hr) { }
  2689   void do_object(oop o) {
  2690     bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
  2691     bool marked = _g1h->is_marked(o, _vo);
  2692     bool print_it = _all || over_tams || marked;
  2694     if (print_it) {
  2695       _out->print_cr(" "PTR_FORMAT"%s",
  2696                      (void *)o, (over_tams) ? " >" : (marked) ? " M" : "");
  2697       PrintReachableOopClosure oopCl(_out, _vo, _all);
  2698       o->oop_iterate_no_header(&oopCl);
  2701 };
  2703 class PrintReachableRegionClosure : public HeapRegionClosure {
  2704 private:
  2705   G1CollectedHeap* _g1h;
  2706   outputStream*    _out;
  2707   VerifyOption     _vo;
  2708   bool             _all;
  2710 public:
  2711   bool doHeapRegion(HeapRegion* hr) {
  2712     HeapWord* b = hr->bottom();
  2713     HeapWord* e = hr->end();
  2714     HeapWord* t = hr->top();
  2715     HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
  2716     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
  2717                    "TAMS: "PTR_FORMAT, b, e, t, p);
  2718     _out->cr();
  2720     HeapWord* from = b;
  2721     HeapWord* to   = t;
  2723     if (to > from) {
  2724       _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
  2725       _out->cr();
  2726       PrintReachableObjectClosure ocl(_out, _vo, _all, hr);
  2727       hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
  2728       _out->cr();
  2731     return false;
  2734   PrintReachableRegionClosure(outputStream* out,
  2735                               VerifyOption  vo,
  2736                               bool          all) :
  2737     _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
  2738 };
  2740 void ConcurrentMark::print_reachable(const char* str,
  2741                                      VerifyOption vo,
  2742                                      bool all) {
  2743   gclog_or_tty->cr();
  2744   gclog_or_tty->print_cr("== Doing heap dump... ");
  2746   if (G1PrintReachableBaseFile == NULL) {
  2747     gclog_or_tty->print_cr("  #### error: no base file defined");
  2748     return;
  2751   if (strlen(G1PrintReachableBaseFile) + 1 + strlen(str) >
  2752       (JVM_MAXPATHLEN - 1)) {
  2753     gclog_or_tty->print_cr("  #### error: file name too long");
  2754     return;
  2757   char file_name[JVM_MAXPATHLEN];
  2758   sprintf(file_name, "%s.%s", G1PrintReachableBaseFile, str);
  2759   gclog_or_tty->print_cr("  dumping to file %s", file_name);
  2761   fileStream fout(file_name);
  2762   if (!fout.is_open()) {
  2763     gclog_or_tty->print_cr("  #### error: could not open file");
  2764     return;
  2767   outputStream* out = &fout;
  2768   out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
  2769   out->cr();
  2771   out->print_cr("--- ITERATING OVER REGIONS");
  2772   out->cr();
  2773   PrintReachableRegionClosure rcl(out, vo, all);
  2774   _g1h->heap_region_iterate(&rcl);
  2775   out->cr();
  2777   gclog_or_tty->print_cr("  done");
  2778   gclog_or_tty->flush();
  2781 #endif // PRODUCT
  2783 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
  2784   // Note we are overriding the read-only view of the prev map here, via
  2785   // the cast.
  2786   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
  2789 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
  2790   _nextMarkBitMap->clearRange(mr);
  2793 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
  2794   clearRangePrevBitmap(mr);
  2795   clearRangeNextBitmap(mr);
  2798 HeapRegion*
  2799 ConcurrentMark::claim_region(uint worker_id) {
  2800   // "checkpoint" the finger
  2801   HeapWord* finger = _finger;
  2803   // _heap_end will not change underneath our feet; it only changes at
  2804   // yield points.
  2805   while (finger < _heap_end) {
  2806     assert(_g1h->is_in_g1_reserved(finger), "invariant");
  2808     // Note on how this code handles humongous regions. In the
  2809     // normal case the finger will reach the start of a "starts
  2810     // humongous" (SH) region. Its end will either be the end of the
  2811     // last "continues humongous" (CH) region in the sequence, or the
  2812     // standard end of the SH region (if the SH is the only region in
  2813     // the sequence). That way claim_region() will skip over the CH
  2814     // regions. However, there is a subtle race between a CM thread
  2815     // executing this method and a mutator thread doing a humongous
  2816     // object allocation. The two are not mutually exclusive as the CM
  2817     // thread does not need to hold the Heap_lock when it gets
  2818     // here. So there is a chance that claim_region() will come across
  2819     // a free region that's in the progress of becoming a SH or a CH
  2820     // region. In the former case, it will either
  2821     //   a) Miss the update to the region's end, in which case it will
  2822     //      visit every subsequent CH region, will find their bitmaps
  2823     //      empty, and do nothing, or
  2824     //   b) Will observe the update of the region's end (in which case
  2825     //      it will skip the subsequent CH regions).
  2826     // If it comes across a region that suddenly becomes CH, the
  2827     // scenario will be similar to b). So, the race between
  2828     // claim_region() and a humongous object allocation might force us
  2829     // to do a bit of unnecessary work (due to some unnecessary bitmap
  2830     // iterations) but it should not introduce and correctness issues.
  2831     HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
  2832     HeapWord*   bottom        = curr_region->bottom();
  2833     HeapWord*   end           = curr_region->end();
  2834     HeapWord*   limit         = curr_region->next_top_at_mark_start();
  2836     if (verbose_low()) {
  2837       gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
  2838                              "["PTR_FORMAT", "PTR_FORMAT"), "
  2839                              "limit = "PTR_FORMAT,
  2840                              worker_id, curr_region, bottom, end, limit);
  2843     // Is the gap between reading the finger and doing the CAS too long?
  2844     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
  2845     if (res == finger) {
  2846       // we succeeded
  2848       // notice that _finger == end cannot be guaranteed here since,
  2849       // someone else might have moved the finger even further
  2850       assert(_finger >= end, "the finger should have moved forward");
  2852       if (verbose_low()) {
  2853         gclog_or_tty->print_cr("[%u] we were successful with region = "
  2854                                PTR_FORMAT, worker_id, curr_region);
  2857       if (limit > bottom) {
  2858         if (verbose_low()) {
  2859           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is not empty, "
  2860                                  "returning it ", worker_id, curr_region);
  2862         return curr_region;
  2863       } else {
  2864         assert(limit == bottom,
  2865                "the region limit should be at bottom");
  2866         if (verbose_low()) {
  2867           gclog_or_tty->print_cr("[%u] region "PTR_FORMAT" is empty, "
  2868                                  "returning NULL", worker_id, curr_region);
  2870         // we return NULL and the caller should try calling
  2871         // claim_region() again.
  2872         return NULL;
  2874     } else {
  2875       assert(_finger > finger, "the finger should have moved forward");
  2876       if (verbose_low()) {
  2877         gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
  2878                                "global finger = "PTR_FORMAT", "
  2879                                "our finger = "PTR_FORMAT,
  2880                                worker_id, _finger, finger);
  2883       // read it again
  2884       finger = _finger;
  2888   return NULL;
  2891 #ifndef PRODUCT
  2892 enum VerifyNoCSetOopsPhase {
  2893   VerifyNoCSetOopsStack,
  2894   VerifyNoCSetOopsQueues,
  2895   VerifyNoCSetOopsSATBCompleted,
  2896   VerifyNoCSetOopsSATBThread
  2897 };
  2899 class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure  {
  2900 private:
  2901   G1CollectedHeap* _g1h;
  2902   VerifyNoCSetOopsPhase _phase;
  2903   int _info;
  2905   const char* phase_str() {
  2906     switch (_phase) {
  2907     case VerifyNoCSetOopsStack:         return "Stack";
  2908     case VerifyNoCSetOopsQueues:        return "Queue";
  2909     case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
  2910     case VerifyNoCSetOopsSATBThread:    return "Thread SATB Buffers";
  2911     default:                            ShouldNotReachHere();
  2913     return NULL;
  2916   void do_object_work(oop obj) {
  2917     guarantee(!_g1h->obj_in_cs(obj),
  2918               err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
  2919                       (void*) obj, phase_str(), _info));
  2922 public:
  2923   VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
  2925   void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
  2926     _phase = phase;
  2927     _info = info;
  2930   virtual void do_oop(oop* p) {
  2931     oop obj = oopDesc::load_decode_heap_oop(p);
  2932     do_object_work(obj);
  2935   virtual void do_oop(narrowOop* p) {
  2936     // We should not come across narrow oops while scanning marking
  2937     // stacks and SATB buffers.
  2938     ShouldNotReachHere();
  2941   virtual void do_object(oop obj) {
  2942     do_object_work(obj);
  2944 };
  2946 void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
  2947                                          bool verify_enqueued_buffers,
  2948                                          bool verify_thread_buffers,
  2949                                          bool verify_fingers) {
  2950   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
  2951   if (!G1CollectedHeap::heap()->mark_in_progress()) {
  2952     return;
  2955   VerifyNoCSetOopsClosure cl;
  2957   if (verify_stacks) {
  2958     // Verify entries on the global mark stack
  2959     cl.set_phase(VerifyNoCSetOopsStack);
  2960     _markStack.oops_do(&cl);
  2962     // Verify entries on the task queues
  2963     for (uint i = 0; i < _max_worker_id; i += 1) {
  2964       cl.set_phase(VerifyNoCSetOopsQueues, i);
  2965       CMTaskQueue* queue = _task_queues->queue(i);
  2966       queue->oops_do(&cl);
  2970   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
  2972   // Verify entries on the enqueued SATB buffers
  2973   if (verify_enqueued_buffers) {
  2974     cl.set_phase(VerifyNoCSetOopsSATBCompleted);
  2975     satb_qs.iterate_completed_buffers_read_only(&cl);
  2978   // Verify entries on the per-thread SATB buffers
  2979   if (verify_thread_buffers) {
  2980     cl.set_phase(VerifyNoCSetOopsSATBThread);
  2981     satb_qs.iterate_thread_buffers_read_only(&cl);
  2984   if (verify_fingers) {
  2985     // Verify the global finger
  2986     HeapWord* global_finger = finger();
  2987     if (global_finger != NULL && global_finger < _heap_end) {
  2988       // The global finger always points to a heap region boundary. We
  2989       // use heap_region_containing_raw() to get the containing region
  2990       // given that the global finger could be pointing to a free region
  2991       // which subsequently becomes continues humongous. If that
  2992       // happens, heap_region_containing() will return the bottom of the
  2993       // corresponding starts humongous region and the check below will
  2994       // not hold any more.
  2995       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
  2996       guarantee(global_finger == global_hr->bottom(),
  2997                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
  2998                         global_finger, HR_FORMAT_PARAMS(global_hr)));
  3001     // Verify the task fingers
  3002     assert(parallel_marking_threads() <= _max_worker_id, "sanity");
  3003     for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
  3004       CMTask* task = _tasks[i];
  3005       HeapWord* task_finger = task->finger();
  3006       if (task_finger != NULL && task_finger < _heap_end) {
  3007         // See above note on the global finger verification.
  3008         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
  3009         guarantee(task_finger == task_hr->bottom() ||
  3010                   !task_hr->in_collection_set(),
  3011                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
  3012                           task_finger, HR_FORMAT_PARAMS(task_hr)));
  3017 #endif // PRODUCT
  3019 // Aggregate the counting data that was constructed concurrently
  3020 // with marking.
  3021 class AggregateCountDataHRClosure: public HeapRegionClosure {
  3022   G1CollectedHeap* _g1h;
  3023   ConcurrentMark* _cm;
  3024   CardTableModRefBS* _ct_bs;
  3025   BitMap* _cm_card_bm;
  3026   uint _max_worker_id;
  3028  public:
  3029   AggregateCountDataHRClosure(G1CollectedHeap* g1h,
  3030                               BitMap* cm_card_bm,
  3031                               uint max_worker_id) :
  3032     _g1h(g1h), _cm(g1h->concurrent_mark()),
  3033     _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
  3034     _cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
  3036   bool doHeapRegion(HeapRegion* hr) {
  3037     if (hr->continuesHumongous()) {
  3038       // We will ignore these here and process them when their
  3039       // associated "starts humongous" region is processed.
  3040       // Note that we cannot rely on their associated
  3041       // "starts humongous" region to have their bit set to 1
  3042       // since, due to the region chunking in the parallel region
  3043       // iteration, a "continues humongous" region might be visited
  3044       // before its associated "starts humongous".
  3045       return false;
  3048     HeapWord* start = hr->bottom();
  3049     HeapWord* limit = hr->next_top_at_mark_start();
  3050     HeapWord* end = hr->end();
  3052     assert(start <= limit && limit <= hr->top() && hr->top() <= hr->end(),
  3053            err_msg("Preconditions not met - "
  3054                    "start: "PTR_FORMAT", limit: "PTR_FORMAT", "
  3055                    "top: "PTR_FORMAT", end: "PTR_FORMAT,
  3056                    start, limit, hr->top(), hr->end()));
  3058     assert(hr->next_marked_bytes() == 0, "Precondition");
  3060     if (start == limit) {
  3061       // NTAMS of this region has not been set so nothing to do.
  3062       return false;
  3065     // 'start' should be in the heap.
  3066     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
  3067     // 'end' *may* be just beyone the end of the heap (if hr is the last region)
  3068     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
  3070     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
  3071     BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
  3072     BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
  3074     // If ntams is not card aligned then we bump card bitmap index
  3075     // for limit so that we get the all the cards spanned by
  3076     // the object ending at ntams.
  3077     // Note: if this is the last region in the heap then ntams
  3078     // could be actually just beyond the end of the the heap;
  3079     // limit_idx will then  correspond to a (non-existent) card
  3080     // that is also outside the heap.
  3081     if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
  3082       limit_idx += 1;
  3085     assert(limit_idx <= end_idx, "or else use atomics");
  3087     // Aggregate the "stripe" in the count data associated with hr.
  3088     uint hrs_index = hr->hrs_index();
  3089     size_t marked_bytes = 0;
  3091     for (uint i = 0; i < _max_worker_id; i += 1) {
  3092       size_t* marked_bytes_array = _cm->count_marked_bytes_array_for(i);
  3093       BitMap* task_card_bm = _cm->count_card_bitmap_for(i);
  3095       // Fetch the marked_bytes in this region for task i and
  3096       // add it to the running total for this region.
  3097       marked_bytes += marked_bytes_array[hrs_index];
  3099       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
  3100       // into the global card bitmap.
  3101       BitMap::idx_t scan_idx = task_card_bm->get_next_one_offset(start_idx, limit_idx);
  3103       while (scan_idx < limit_idx) {
  3104         assert(task_card_bm->at(scan_idx) == true, "should be");
  3105         _cm_card_bm->set_bit(scan_idx);
  3106         assert(_cm_card_bm->at(scan_idx) == true, "should be");
  3108         // BitMap::get_next_one_offset() can handle the case when
  3109         // its left_offset parameter is greater than its right_offset
  3110         // parameter. It does, however, have an early exit if
  3111         // left_offset == right_offset. So let's limit the value
  3112         // passed in for left offset here.
  3113         BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
  3114         scan_idx = task_card_bm->get_next_one_offset(next_idx, limit_idx);
  3118     // Update the marked bytes for this region.
  3119     hr->add_to_marked_bytes(marked_bytes);
  3121     // Next heap region
  3122     return false;
  3124 };
  3126 class G1AggregateCountDataTask: public AbstractGangTask {
  3127 protected:
  3128   G1CollectedHeap* _g1h;
  3129   ConcurrentMark* _cm;
  3130   BitMap* _cm_card_bm;
  3131   uint _max_worker_id;
  3132   int _active_workers;
  3134 public:
  3135   G1AggregateCountDataTask(G1CollectedHeap* g1h,
  3136                            ConcurrentMark* cm,
  3137                            BitMap* cm_card_bm,
  3138                            uint max_worker_id,
  3139                            int n_workers) :
  3140     AbstractGangTask("Count Aggregation"),
  3141     _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
  3142     _max_worker_id(max_worker_id),
  3143     _active_workers(n_workers) { }
  3145   void work(uint worker_id) {
  3146     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
  3148     if (G1CollectedHeap::use_parallel_gc_threads()) {
  3149       _g1h->heap_region_par_iterate_chunked(&cl, worker_id,
  3150                                             _active_workers,
  3151                                             HeapRegion::AggregateCountClaimValue);
  3152     } else {
  3153       _g1h->heap_region_iterate(&cl);
  3156 };
  3159 void ConcurrentMark::aggregate_count_data() {
  3160   int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  3161                         _g1h->workers()->active_workers() :
  3162                         1);
  3164   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
  3165                                            _max_worker_id, n_workers);
  3167   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3168     assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  3169            "sanity check");
  3170     _g1h->set_par_threads(n_workers);
  3171     _g1h->workers()->run_task(&g1_par_agg_task);
  3172     _g1h->set_par_threads(0);
  3174     assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
  3175            "sanity check");
  3176     _g1h->reset_heap_region_claim_values();
  3177   } else {
  3178     g1_par_agg_task.work(0);
  3182 // Clear the per-worker arrays used to store the per-region counting data
  3183 void ConcurrentMark::clear_all_count_data() {
  3184   // Clear the global card bitmap - it will be filled during
  3185   // liveness count aggregation (during remark) and the
  3186   // final counting task.
  3187   _card_bm.clear();
  3189   // Clear the global region bitmap - it will be filled as part
  3190   // of the final counting task.
  3191   _region_bm.clear();
  3193   uint max_regions = _g1h->max_regions();
  3194   assert(_max_worker_id > 0, "uninitialized");
  3196   for (uint i = 0; i < _max_worker_id; i += 1) {
  3197     BitMap* task_card_bm = count_card_bitmap_for(i);
  3198     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
  3200     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
  3201     assert(marked_bytes_array != NULL, "uninitialized");
  3203     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
  3204     task_card_bm->clear();
  3208 void ConcurrentMark::print_stats() {
  3209   if (verbose_stats()) {
  3210     gclog_or_tty->print_cr("---------------------------------------------------------------------");
  3211     for (size_t i = 0; i < _active_tasks; ++i) {
  3212       _tasks[i]->print_stats();
  3213       gclog_or_tty->print_cr("---------------------------------------------------------------------");
  3218 // abandon current marking iteration due to a Full GC
  3219 void ConcurrentMark::abort() {
  3220   // Clear all marks to force marking thread to do nothing
  3221   _nextMarkBitMap->clearAll();
  3222   // Clear the liveness counting data
  3223   clear_all_count_data();
  3224   // Empty mark stack
  3225   reset_marking_state();
  3226   for (uint i = 0; i < _max_worker_id; ++i) {
  3227     _tasks[i]->clear_region_fields();
  3229   _has_aborted = true;
  3231   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  3232   satb_mq_set.abandon_partial_marking();
  3233   // This can be called either during or outside marking, we'll read
  3234   // the expected_active value from the SATB queue set.
  3235   satb_mq_set.set_active_all_threads(
  3236                                  false, /* new active value */
  3237                                  satb_mq_set.is_active() /* expected_active */);
  3239   _g1h->trace_heap_after_concurrent_cycle();
  3240   _g1h->register_concurrent_cycle_end();
  3243 static void print_ms_time_info(const char* prefix, const char* name,
  3244                                NumberSeq& ns) {
  3245   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
  3246                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
  3247   if (ns.num() > 0) {
  3248     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
  3249                            prefix, ns.sd(), ns.maximum());
  3253 void ConcurrentMark::print_summary_info() {
  3254   gclog_or_tty->print_cr(" Concurrent marking:");
  3255   print_ms_time_info("  ", "init marks", _init_times);
  3256   print_ms_time_info("  ", "remarks", _remark_times);
  3258     print_ms_time_info("     ", "final marks", _remark_mark_times);
  3259     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
  3262   print_ms_time_info("  ", "cleanups", _cleanup_times);
  3263   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
  3264                          _total_counting_time,
  3265                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
  3266                           (double)_cleanup_times.num()
  3267                          : 0.0));
  3268   if (G1ScrubRemSets) {
  3269     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
  3270                            _total_rs_scrub_time,
  3271                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
  3272                             (double)_cleanup_times.num()
  3273                            : 0.0));
  3275   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
  3276                          (_init_times.sum() + _remark_times.sum() +
  3277                           _cleanup_times.sum())/1000.0);
  3278   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
  3279                 "(%8.2f s marking).",
  3280                 cmThread()->vtime_accum(),
  3281                 cmThread()->vtime_mark_accum());
  3284 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
  3285   if (use_parallel_marking_threads()) {
  3286     _parallel_workers->print_worker_threads_on(st);
  3290 void ConcurrentMark::print_on_error(outputStream* st) const {
  3291   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
  3292       _prevMarkBitMap, _nextMarkBitMap);
  3293   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
  3294   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
  3297 // We take a break if someone is trying to stop the world.
  3298 bool ConcurrentMark::do_yield_check(uint worker_id) {
  3299   if (should_yield()) {
  3300     if (worker_id == 0) {
  3301       _g1h->g1_policy()->record_concurrent_pause();
  3303     cmThread()->yield();
  3304     return true;
  3305   } else {
  3306     return false;
  3310 bool ConcurrentMark::should_yield() {
  3311   return cmThread()->should_yield();
  3314 bool ConcurrentMark::containing_card_is_marked(void* p) {
  3315   size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
  3316   return _card_bm.at(offset >> CardTableModRefBS::card_shift);
  3319 bool ConcurrentMark::containing_cards_are_marked(void* start,
  3320                                                  void* last) {
  3321   return containing_card_is_marked(start) &&
  3322          containing_card_is_marked(last);
  3325 #ifndef PRODUCT
  3326 // for debugging purposes
  3327 void ConcurrentMark::print_finger() {
  3328   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
  3329                          _heap_start, _heap_end, _finger);
  3330   for (uint i = 0; i < _max_worker_id; ++i) {
  3331     gclog_or_tty->print("   %u: "PTR_FORMAT, i, _tasks[i]->finger());
  3333   gclog_or_tty->print_cr("");
  3335 #endif
  3337 void CMTask::scan_object(oop obj) {
  3338   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
  3340   if (_cm->verbose_high()) {
  3341     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
  3342                            _worker_id, (void*) obj);
  3345   size_t obj_size = obj->size();
  3346   _words_scanned += obj_size;
  3348   obj->oop_iterate(_cm_oop_closure);
  3349   statsOnly( ++_objs_scanned );
  3350   check_limits();
  3353 // Closure for iteration over bitmaps
  3354 class CMBitMapClosure : public BitMapClosure {
  3355 private:
  3356   // the bitmap that is being iterated over
  3357   CMBitMap*                   _nextMarkBitMap;
  3358   ConcurrentMark*             _cm;
  3359   CMTask*                     _task;
  3361 public:
  3362   CMBitMapClosure(CMTask *task, ConcurrentMark* cm, CMBitMap* nextMarkBitMap) :
  3363     _task(task), _cm(cm), _nextMarkBitMap(nextMarkBitMap) { }
  3365   bool do_bit(size_t offset) {
  3366     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
  3367     assert(_nextMarkBitMap->isMarked(addr), "invariant");
  3368     assert( addr < _cm->finger(), "invariant");
  3370     statsOnly( _task->increase_objs_found_on_bitmap() );
  3371     assert(addr >= _task->finger(), "invariant");
  3373     // We move that task's local finger along.
  3374     _task->move_finger_to(addr);
  3376     _task->scan_object(oop(addr));
  3377     // we only partially drain the local queue and global stack
  3378     _task->drain_local_queue(true);
  3379     _task->drain_global_stack(true);
  3381     // if the has_aborted flag has been raised, we need to bail out of
  3382     // the iteration
  3383     return !_task->has_aborted();
  3385 };
  3387 // Closure for iterating over objects, currently only used for
  3388 // processing SATB buffers.
  3389 class CMObjectClosure : public ObjectClosure {
  3390 private:
  3391   CMTask* _task;
  3393 public:
  3394   void do_object(oop obj) {
  3395     _task->deal_with_reference(obj);
  3398   CMObjectClosure(CMTask* task) : _task(task) { }
  3399 };
  3401 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
  3402                                ConcurrentMark* cm,
  3403                                CMTask* task)
  3404   : _g1h(g1h), _cm(cm), _task(task) {
  3405   assert(_ref_processor == NULL, "should be initialized to NULL");
  3407   if (G1UseConcMarkReferenceProcessing) {
  3408     _ref_processor = g1h->ref_processor_cm();
  3409     assert(_ref_processor != NULL, "should not be NULL");
  3413 void CMTask::setup_for_region(HeapRegion* hr) {
  3414   // Separated the asserts so that we know which one fires.
  3415   assert(hr != NULL,
  3416         "claim_region() should have filtered out continues humongous regions");
  3417   assert(!hr->continuesHumongous(),
  3418         "claim_region() should have filtered out continues humongous regions");
  3420   if (_cm->verbose_low()) {
  3421     gclog_or_tty->print_cr("[%u] setting up for region "PTR_FORMAT,
  3422                            _worker_id, hr);
  3425   _curr_region  = hr;
  3426   _finger       = hr->bottom();
  3427   update_region_limit();
  3430 void CMTask::update_region_limit() {
  3431   HeapRegion* hr            = _curr_region;
  3432   HeapWord* bottom          = hr->bottom();
  3433   HeapWord* limit           = hr->next_top_at_mark_start();
  3435   if (limit == bottom) {
  3436     if (_cm->verbose_low()) {
  3437       gclog_or_tty->print_cr("[%u] found an empty region "
  3438                              "["PTR_FORMAT", "PTR_FORMAT")",
  3439                              _worker_id, bottom, limit);
  3441     // The region was collected underneath our feet.
  3442     // We set the finger to bottom to ensure that the bitmap
  3443     // iteration that will follow this will not do anything.
  3444     // (this is not a condition that holds when we set the region up,
  3445     // as the region is not supposed to be empty in the first place)
  3446     _finger = bottom;
  3447   } else if (limit >= _region_limit) {
  3448     assert(limit >= _finger, "peace of mind");
  3449   } else {
  3450     assert(limit < _region_limit, "only way to get here");
  3451     // This can happen under some pretty unusual circumstances.  An
  3452     // evacuation pause empties the region underneath our feet (NTAMS
  3453     // at bottom). We then do some allocation in the region (NTAMS
  3454     // stays at bottom), followed by the region being used as a GC
  3455     // alloc region (NTAMS will move to top() and the objects
  3456     // originally below it will be grayed). All objects now marked in
  3457     // the region are explicitly grayed, if below the global finger,
  3458     // and we do not need in fact to scan anything else. So, we simply
  3459     // set _finger to be limit to ensure that the bitmap iteration
  3460     // doesn't do anything.
  3461     _finger = limit;
  3464   _region_limit = limit;
  3467 void CMTask::giveup_current_region() {
  3468   assert(_curr_region != NULL, "invariant");
  3469   if (_cm->verbose_low()) {
  3470     gclog_or_tty->print_cr("[%u] giving up region "PTR_FORMAT,
  3471                            _worker_id, _curr_region);
  3473   clear_region_fields();
  3476 void CMTask::clear_region_fields() {
  3477   // Values for these three fields that indicate that we're not
  3478   // holding on to a region.
  3479   _curr_region   = NULL;
  3480   _finger        = NULL;
  3481   _region_limit  = NULL;
  3484 void CMTask::set_cm_oop_closure(G1CMOopClosure* cm_oop_closure) {
  3485   if (cm_oop_closure == NULL) {
  3486     assert(_cm_oop_closure != NULL, "invariant");
  3487   } else {
  3488     assert(_cm_oop_closure == NULL, "invariant");
  3490   _cm_oop_closure = cm_oop_closure;
  3493 void CMTask::reset(CMBitMap* nextMarkBitMap) {
  3494   guarantee(nextMarkBitMap != NULL, "invariant");
  3496   if (_cm->verbose_low()) {
  3497     gclog_or_tty->print_cr("[%u] resetting", _worker_id);
  3500   _nextMarkBitMap                = nextMarkBitMap;
  3501   clear_region_fields();
  3503   _calls                         = 0;
  3504   _elapsed_time_ms               = 0.0;
  3505   _termination_time_ms           = 0.0;
  3506   _termination_start_time_ms     = 0.0;
  3508 #if _MARKING_STATS_
  3509   _local_pushes                  = 0;
  3510   _local_pops                    = 0;
  3511   _local_max_size                = 0;
  3512   _objs_scanned                  = 0;
  3513   _global_pushes                 = 0;
  3514   _global_pops                   = 0;
  3515   _global_max_size               = 0;
  3516   _global_transfers_to           = 0;
  3517   _global_transfers_from         = 0;
  3518   _regions_claimed               = 0;
  3519   _objs_found_on_bitmap          = 0;
  3520   _satb_buffers_processed        = 0;
  3521   _steal_attempts                = 0;
  3522   _steals                        = 0;
  3523   _aborted                       = 0;
  3524   _aborted_overflow              = 0;
  3525   _aborted_cm_aborted            = 0;
  3526   _aborted_yield                 = 0;
  3527   _aborted_timed_out             = 0;
  3528   _aborted_satb                  = 0;
  3529   _aborted_termination           = 0;
  3530 #endif // _MARKING_STATS_
  3533 bool CMTask::should_exit_termination() {
  3534   regular_clock_call();
  3535   // This is called when we are in the termination protocol. We should
  3536   // quit if, for some reason, this task wants to abort or the global
  3537   // stack is not empty (this means that we can get work from it).
  3538   return !_cm->mark_stack_empty() || has_aborted();
  3541 void CMTask::reached_limit() {
  3542   assert(_words_scanned >= _words_scanned_limit ||
  3543          _refs_reached >= _refs_reached_limit ,
  3544          "shouldn't have been called otherwise");
  3545   regular_clock_call();
  3548 void CMTask::regular_clock_call() {
  3549   if (has_aborted()) return;
  3551   // First, we need to recalculate the words scanned and refs reached
  3552   // limits for the next clock call.
  3553   recalculate_limits();
  3555   // During the regular clock call we do the following
  3557   // (1) If an overflow has been flagged, then we abort.
  3558   if (_cm->has_overflown()) {
  3559     set_has_aborted();
  3560     return;
  3563   // If we are not concurrent (i.e. we're doing remark) we don't need
  3564   // to check anything else. The other steps are only needed during
  3565   // the concurrent marking phase.
  3566   if (!concurrent()) return;
  3568   // (2) If marking has been aborted for Full GC, then we also abort.
  3569   if (_cm->has_aborted()) {
  3570     set_has_aborted();
  3571     statsOnly( ++_aborted_cm_aborted );
  3572     return;
  3575   double curr_time_ms = os::elapsedVTime() * 1000.0;
  3577   // (3) If marking stats are enabled, then we update the step history.
  3578 #if _MARKING_STATS_
  3579   if (_words_scanned >= _words_scanned_limit) {
  3580     ++_clock_due_to_scanning;
  3582   if (_refs_reached >= _refs_reached_limit) {
  3583     ++_clock_due_to_marking;
  3586   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
  3587   _interval_start_time_ms = curr_time_ms;
  3588   _all_clock_intervals_ms.add(last_interval_ms);
  3590   if (_cm->verbose_medium()) {
  3591       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
  3592                         "scanned = %d%s, refs reached = %d%s",
  3593                         _worker_id, last_interval_ms,
  3594                         _words_scanned,
  3595                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
  3596                         _refs_reached,
  3597                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
  3599 #endif // _MARKING_STATS_
  3601   // (4) We check whether we should yield. If we have to, then we abort.
  3602   if (_cm->should_yield()) {
  3603     // We should yield. To do this we abort the task. The caller is
  3604     // responsible for yielding.
  3605     set_has_aborted();
  3606     statsOnly( ++_aborted_yield );
  3607     return;
  3610   // (5) We check whether we've reached our time quota. If we have,
  3611   // then we abort.
  3612   double elapsed_time_ms = curr_time_ms - _start_time_ms;
  3613   if (elapsed_time_ms > _time_target_ms) {
  3614     set_has_aborted();
  3615     _has_timed_out = true;
  3616     statsOnly( ++_aborted_timed_out );
  3617     return;
  3620   // (6) Finally, we check whether there are enough completed STAB
  3621   // buffers available for processing. If there are, we abort.
  3622   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  3623   if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
  3624     if (_cm->verbose_low()) {
  3625       gclog_or_tty->print_cr("[%u] aborting to deal with pending SATB buffers",
  3626                              _worker_id);
  3628     // we do need to process SATB buffers, we'll abort and restart
  3629     // the marking task to do so
  3630     set_has_aborted();
  3631     statsOnly( ++_aborted_satb );
  3632     return;
  3636 void CMTask::recalculate_limits() {
  3637   _real_words_scanned_limit = _words_scanned + words_scanned_period;
  3638   _words_scanned_limit      = _real_words_scanned_limit;
  3640   _real_refs_reached_limit  = _refs_reached  + refs_reached_period;
  3641   _refs_reached_limit       = _real_refs_reached_limit;
  3644 void CMTask::decrease_limits() {
  3645   // This is called when we believe that we're going to do an infrequent
  3646   // operation which will increase the per byte scanned cost (i.e. move
  3647   // entries to/from the global stack). It basically tries to decrease the
  3648   // scanning limit so that the clock is called earlier.
  3650   if (_cm->verbose_medium()) {
  3651     gclog_or_tty->print_cr("[%u] decreasing limits", _worker_id);
  3654   _words_scanned_limit = _real_words_scanned_limit -
  3655     3 * words_scanned_period / 4;
  3656   _refs_reached_limit  = _real_refs_reached_limit -
  3657     3 * refs_reached_period / 4;
  3660 void CMTask::move_entries_to_global_stack() {
  3661   // local array where we'll store the entries that will be popped
  3662   // from the local queue
  3663   oop buffer[global_stack_transfer_size];
  3665   int n = 0;
  3666   oop obj;
  3667   while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) {
  3668     buffer[n] = obj;
  3669     ++n;
  3672   if (n > 0) {
  3673     // we popped at least one entry from the local queue
  3675     statsOnly( ++_global_transfers_to; _local_pops += n );
  3677     if (!_cm->mark_stack_push(buffer, n)) {
  3678       if (_cm->verbose_low()) {
  3679         gclog_or_tty->print_cr("[%u] aborting due to global stack overflow",
  3680                                _worker_id);
  3682       set_has_aborted();
  3683     } else {
  3684       // the transfer was successful
  3686       if (_cm->verbose_medium()) {
  3687         gclog_or_tty->print_cr("[%u] pushed %d entries to the global stack",
  3688                                _worker_id, n);
  3690       statsOnly( int tmp_size = _cm->mark_stack_size();
  3691                  if (tmp_size > _global_max_size) {
  3692                    _global_max_size = tmp_size;
  3694                  _global_pushes += n );
  3698   // this operation was quite expensive, so decrease the limits
  3699   decrease_limits();
  3702 void CMTask::get_entries_from_global_stack() {
  3703   // local array where we'll store the entries that will be popped
  3704   // from the global stack.
  3705   oop buffer[global_stack_transfer_size];
  3706   int n;
  3707   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
  3708   assert(n <= global_stack_transfer_size,
  3709          "we should not pop more than the given limit");
  3710   if (n > 0) {
  3711     // yes, we did actually pop at least one entry
  3713     statsOnly( ++_global_transfers_from; _global_pops += n );
  3714     if (_cm->verbose_medium()) {
  3715       gclog_or_tty->print_cr("[%u] popped %d entries from the global stack",
  3716                              _worker_id, n);
  3718     for (int i = 0; i < n; ++i) {
  3719       bool success = _task_queue->push(buffer[i]);
  3720       // We only call this when the local queue is empty or under a
  3721       // given target limit. So, we do not expect this push to fail.
  3722       assert(success, "invariant");
  3725     statsOnly( int tmp_size = _task_queue->size();
  3726                if (tmp_size > _local_max_size) {
  3727                  _local_max_size = tmp_size;
  3729                _local_pushes += n );
  3732   // this operation was quite expensive, so decrease the limits
  3733   decrease_limits();
  3736 void CMTask::drain_local_queue(bool partially) {
  3737   if (has_aborted()) return;
  3739   // Decide what the target size is, depending whether we're going to
  3740   // drain it partially (so that other tasks can steal if they run out
  3741   // of things to do) or totally (at the very end).
  3742   size_t target_size;
  3743   if (partially) {
  3744     target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
  3745   } else {
  3746     target_size = 0;
  3749   if (_task_queue->size() > target_size) {
  3750     if (_cm->verbose_high()) {
  3751       gclog_or_tty->print_cr("[%u] draining local queue, target size = %d",
  3752                              _worker_id, target_size);
  3755     oop obj;
  3756     bool ret = _task_queue->pop_local(obj);
  3757     while (ret) {
  3758       statsOnly( ++_local_pops );
  3760       if (_cm->verbose_high()) {
  3761         gclog_or_tty->print_cr("[%u] popped "PTR_FORMAT, _worker_id,
  3762                                (void*) obj);
  3765       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
  3766       assert(!_g1h->is_on_master_free_list(
  3767                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
  3769       scan_object(obj);
  3771       if (_task_queue->size() <= target_size || has_aborted()) {
  3772         ret = false;
  3773       } else {
  3774         ret = _task_queue->pop_local(obj);
  3778     if (_cm->verbose_high()) {
  3779       gclog_or_tty->print_cr("[%u] drained local queue, size = %d",
  3780                              _worker_id, _task_queue->size());
  3785 void CMTask::drain_global_stack(bool partially) {
  3786   if (has_aborted()) return;
  3788   // We have a policy to drain the local queue before we attempt to
  3789   // drain the global stack.
  3790   assert(partially || _task_queue->size() == 0, "invariant");
  3792   // Decide what the target size is, depending whether we're going to
  3793   // drain it partially (so that other tasks can steal if they run out
  3794   // of things to do) or totally (at the very end).  Notice that,
  3795   // because we move entries from the global stack in chunks or
  3796   // because another task might be doing the same, we might in fact
  3797   // drop below the target. But, this is not a problem.
  3798   size_t target_size;
  3799   if (partially) {
  3800     target_size = _cm->partial_mark_stack_size_target();
  3801   } else {
  3802     target_size = 0;
  3805   if (_cm->mark_stack_size() > target_size) {
  3806     if (_cm->verbose_low()) {
  3807       gclog_or_tty->print_cr("[%u] draining global_stack, target size %d",
  3808                              _worker_id, target_size);
  3811     while (!has_aborted() && _cm->mark_stack_size() > target_size) {
  3812       get_entries_from_global_stack();
  3813       drain_local_queue(partially);
  3816     if (_cm->verbose_low()) {
  3817       gclog_or_tty->print_cr("[%u] drained global stack, size = %d",
  3818                              _worker_id, _cm->mark_stack_size());
  3823 // SATB Queue has several assumptions on whether to call the par or
  3824 // non-par versions of the methods. this is why some of the code is
  3825 // replicated. We should really get rid of the single-threaded version
  3826 // of the code to simplify things.
  3827 void CMTask::drain_satb_buffers() {
  3828   if (has_aborted()) return;
  3830   // We set this so that the regular clock knows that we're in the
  3831   // middle of draining buffers and doesn't set the abort flag when it
  3832   // notices that SATB buffers are available for draining. It'd be
  3833   // very counter productive if it did that. :-)
  3834   _draining_satb_buffers = true;
  3836   CMObjectClosure oc(this);
  3837   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
  3838   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3839     satb_mq_set.set_par_closure(_worker_id, &oc);
  3840   } else {
  3841     satb_mq_set.set_closure(&oc);
  3844   // This keeps claiming and applying the closure to completed buffers
  3845   // until we run out of buffers or we need to abort.
  3846   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3847     while (!has_aborted() &&
  3848            satb_mq_set.par_apply_closure_to_completed_buffer(_worker_id)) {
  3849       if (_cm->verbose_medium()) {
  3850         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
  3852       statsOnly( ++_satb_buffers_processed );
  3853       regular_clock_call();
  3855   } else {
  3856     while (!has_aborted() &&
  3857            satb_mq_set.apply_closure_to_completed_buffer()) {
  3858       if (_cm->verbose_medium()) {
  3859         gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
  3861       statsOnly( ++_satb_buffers_processed );
  3862       regular_clock_call();
  3866   if (!concurrent() && !has_aborted()) {
  3867     // We should only do this during remark.
  3868     if (G1CollectedHeap::use_parallel_gc_threads()) {
  3869       satb_mq_set.par_iterate_closure_all_threads(_worker_id);
  3870     } else {
  3871       satb_mq_set.iterate_closure_all_threads();
  3875   _draining_satb_buffers = false;
  3877   assert(has_aborted() ||
  3878          concurrent() ||
  3879          satb_mq_set.completed_buffers_num() == 0, "invariant");
  3881   if (G1CollectedHeap::use_parallel_gc_threads()) {
  3882     satb_mq_set.set_par_closure(_worker_id, NULL);
  3883   } else {
  3884     satb_mq_set.set_closure(NULL);
  3887   // again, this was a potentially expensive operation, decrease the
  3888   // limits to get the regular clock call early
  3889   decrease_limits();
  3892 void CMTask::print_stats() {
  3893   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
  3894                          _worker_id, _calls);
  3895   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
  3896                          _elapsed_time_ms, _termination_time_ms);
  3897   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
  3898                          _step_times_ms.num(), _step_times_ms.avg(),
  3899                          _step_times_ms.sd());
  3900   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
  3901                          _step_times_ms.maximum(), _step_times_ms.sum());
  3903 #if _MARKING_STATS_
  3904   gclog_or_tty->print_cr("  Clock Intervals (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
  3905                          _all_clock_intervals_ms.num(), _all_clock_intervals_ms.avg(),
  3906                          _all_clock_intervals_ms.sd());
  3907   gclog_or_tty->print_cr("                         max = %1.2lfms, total = %1.2lfms",
  3908                          _all_clock_intervals_ms.maximum(),
  3909                          _all_clock_intervals_ms.sum());
  3910   gclog_or_tty->print_cr("  Clock Causes (cum): scanning = %d, marking = %d",
  3911                          _clock_due_to_scanning, _clock_due_to_marking);
  3912   gclog_or_tty->print_cr("  Objects: scanned = %d, found on the bitmap = %d",
  3913                          _objs_scanned, _objs_found_on_bitmap);
  3914   gclog_or_tty->print_cr("  Local Queue:  pushes = %d, pops = %d, max size = %d",
  3915                          _local_pushes, _local_pops, _local_max_size);
  3916   gclog_or_tty->print_cr("  Global Stack: pushes = %d, pops = %d, max size = %d",
  3917                          _global_pushes, _global_pops, _global_max_size);
  3918   gclog_or_tty->print_cr("                transfers to = %d, transfers from = %d",
  3919                          _global_transfers_to,_global_transfers_from);
  3920   gclog_or_tty->print_cr("  Regions: claimed = %d", _regions_claimed);
  3921   gclog_or_tty->print_cr("  SATB buffers: processed = %d", _satb_buffers_processed);
  3922   gclog_or_tty->print_cr("  Steals: attempts = %d, successes = %d",
  3923                          _steal_attempts, _steals);
  3924   gclog_or_tty->print_cr("  Aborted: %d, due to", _aborted);
  3925   gclog_or_tty->print_cr("    overflow: %d, global abort: %d, yield: %d",
  3926                          _aborted_overflow, _aborted_cm_aborted, _aborted_yield);
  3927   gclog_or_tty->print_cr("    time out: %d, SATB: %d, termination: %d",
  3928                          _aborted_timed_out, _aborted_satb, _aborted_termination);
  3929 #endif // _MARKING_STATS_
  3932 /*****************************************************************************
  3934     The do_marking_step(time_target_ms, ...) method is the building
  3935     block of the parallel marking framework. It can be called in parallel
  3936     with other invocations of do_marking_step() on different tasks
  3937     (but only one per task, obviously) and concurrently with the
  3938     mutator threads, or during remark, hence it eliminates the need
  3939     for two versions of the code. When called during remark, it will
  3940     pick up from where the task left off during the concurrent marking
  3941     phase. Interestingly, tasks are also claimable during evacuation
  3942     pauses too, since do_marking_step() ensures that it aborts before
  3943     it needs to yield.
  3945     The data structures that it uses to do marking work are the
  3946     following:
  3948       (1) Marking Bitmap. If there are gray objects that appear only
  3949       on the bitmap (this happens either when dealing with an overflow
  3950       or when the initial marking phase has simply marked the roots
  3951       and didn't push them on the stack), then tasks claim heap
  3952       regions whose bitmap they then scan to find gray objects. A
  3953       global finger indicates where the end of the last claimed region
  3954       is. A local finger indicates how far into the region a task has
  3955       scanned. The two fingers are used to determine how to gray an
  3956       object (i.e. whether simply marking it is OK, as it will be
  3957       visited by a task in the future, or whether it needs to be also
  3958       pushed on a stack).
  3960       (2) Local Queue. The local queue of the task which is accessed
  3961       reasonably efficiently by the task. Other tasks can steal from
  3962       it when they run out of work. Throughout the marking phase, a
  3963       task attempts to keep its local queue short but not totally
  3964       empty, so that entries are available for stealing by other
  3965       tasks. Only when there is no more work, a task will totally
  3966       drain its local queue.
  3968       (3) Global Mark Stack. This handles local queue overflow. During
  3969       marking only sets of entries are moved between it and the local
  3970       queues, as access to it requires a mutex and more fine-grain
  3971       interaction with it which might cause contention. If it
  3972       overflows, then the marking phase should restart and iterate
  3973       over the bitmap to identify gray objects. Throughout the marking
  3974       phase, tasks attempt to keep the global mark stack at a small
  3975       length but not totally empty, so that entries are available for
  3976       popping by other tasks. Only when there is no more work, tasks
  3977       will totally drain the global mark stack.
  3979       (4) SATB Buffer Queue. This is where completed SATB buffers are
  3980       made available. Buffers are regularly removed from this queue
  3981       and scanned for roots, so that the queue doesn't get too
  3982       long. During remark, all completed buffers are processed, as
  3983       well as the filled in parts of any uncompleted buffers.
  3985     The do_marking_step() method tries to abort when the time target
  3986     has been reached. There are a few other cases when the
  3987     do_marking_step() method also aborts:
  3989       (1) When the marking phase has been aborted (after a Full GC).
  3991       (2) When a global overflow (on the global stack) has been
  3992       triggered. Before the task aborts, it will actually sync up with
  3993       the other tasks to ensure that all the marking data structures
  3994       (local queues, stacks, fingers etc.)  are re-initialized so that
  3995       when do_marking_step() completes, the marking phase can
  3996       immediately restart.
  3998       (3) When enough completed SATB buffers are available. The
  3999       do_marking_step() method only tries to drain SATB buffers right
  4000       at the beginning. So, if enough buffers are available, the
  4001       marking step aborts and the SATB buffers are processed at
  4002       the beginning of the next invocation.
  4004       (4) To yield. when we have to yield then we abort and yield
  4005       right at the end of do_marking_step(). This saves us from a lot
  4006       of hassle as, by yielding we might allow a Full GC. If this
  4007       happens then objects will be compacted underneath our feet, the
  4008       heap might shrink, etc. We save checking for this by just
  4009       aborting and doing the yield right at the end.
  4011     From the above it follows that the do_marking_step() method should
  4012     be called in a loop (or, otherwise, regularly) until it completes.
  4014     If a marking step completes without its has_aborted() flag being
  4015     true, it means it has completed the current marking phase (and
  4016     also all other marking tasks have done so and have all synced up).
  4018     A method called regular_clock_call() is invoked "regularly" (in
  4019     sub ms intervals) throughout marking. It is this clock method that
  4020     checks all the abort conditions which were mentioned above and
  4021     decides when the task should abort. A work-based scheme is used to
  4022     trigger this clock method: when the number of object words the
  4023     marking phase has scanned or the number of references the marking
  4024     phase has visited reach a given limit. Additional invocations to
  4025     the method clock have been planted in a few other strategic places
  4026     too. The initial reason for the clock method was to avoid calling
  4027     vtime too regularly, as it is quite expensive. So, once it was in
  4028     place, it was natural to piggy-back all the other conditions on it
  4029     too and not constantly check them throughout the code.
  4031     If do_termination is true then do_marking_step will enter its
  4032     termination protocol.
  4034     The value of is_serial must be true when do_marking_step is being
  4035     called serially (i.e. by the VMThread) and do_marking_step should
  4036     skip any synchronization in the termination and overflow code.
  4037     Examples include the serial remark code and the serial reference
  4038     processing closures.
  4040     The value of is_serial must be false when do_marking_step is
  4041     being called by any of the worker threads in a work gang.
  4042     Examples include the concurrent marking code (CMMarkingTask),
  4043     the MT remark code, and the MT reference processing closures.
  4045  *****************************************************************************/
  4047 void CMTask::do_marking_step(double time_target_ms,
  4048                              bool do_termination,
  4049                              bool is_serial) {
  4050   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
  4051   assert(concurrent() == _cm->concurrent(), "they should be the same");
  4053   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
  4054   assert(_task_queues != NULL, "invariant");
  4055   assert(_task_queue != NULL, "invariant");
  4056   assert(_task_queues->queue(_worker_id) == _task_queue, "invariant");
  4058   assert(!_claimed,
  4059          "only one thread should claim this task at any one time");
  4061   // OK, this doesn't safeguard again all possible scenarios, as it is
  4062   // possible for two threads to set the _claimed flag at the same
  4063   // time. But it is only for debugging purposes anyway and it will
  4064   // catch most problems.
  4065   _claimed = true;
  4067   _start_time_ms = os::elapsedVTime() * 1000.0;
  4068   statsOnly( _interval_start_time_ms = _start_time_ms );
  4070   // If do_stealing is true then do_marking_step will attempt to
  4071   // steal work from the other CMTasks. It only makes sense to
  4072   // enable stealing when the termination protocol is enabled
  4073   // and do_marking_step() is not being called serially.
  4074   bool do_stealing = do_termination && !is_serial;
  4076   double diff_prediction_ms =
  4077     g1_policy->get_new_prediction(&_marking_step_diffs_ms);
  4078   _time_target_ms = time_target_ms - diff_prediction_ms;
  4080   // set up the variables that are used in the work-based scheme to
  4081   // call the regular clock method
  4082   _words_scanned = 0;
  4083   _refs_reached  = 0;
  4084   recalculate_limits();
  4086   // clear all flags
  4087   clear_has_aborted();
  4088   _has_timed_out = false;
  4089   _draining_satb_buffers = false;
  4091   ++_calls;
  4093   if (_cm->verbose_low()) {
  4094     gclog_or_tty->print_cr("[%u] >>>>>>>>>> START, call = %d, "
  4095                            "target = %1.2lfms >>>>>>>>>>",
  4096                            _worker_id, _calls, _time_target_ms);
  4099   // Set up the bitmap and oop closures. Anything that uses them is
  4100   // eventually called from this method, so it is OK to allocate these
  4101   // statically.
  4102   CMBitMapClosure bitmap_closure(this, _cm, _nextMarkBitMap);
  4103   G1CMOopClosure  cm_oop_closure(_g1h, _cm, this);
  4104   set_cm_oop_closure(&cm_oop_closure);
  4106   if (_cm->has_overflown()) {
  4107     // This can happen if the mark stack overflows during a GC pause
  4108     // and this task, after a yield point, restarts. We have to abort
  4109     // as we need to get into the overflow protocol which happens
  4110     // right at the end of this task.
  4111     set_has_aborted();
  4114   // First drain any available SATB buffers. After this, we will not
  4115   // look at SATB buffers before the next invocation of this method.
  4116   // If enough completed SATB buffers are queued up, the regular clock
  4117   // will abort this task so that it restarts.
  4118   drain_satb_buffers();
  4119   // ...then partially drain the local queue and the global stack
  4120   drain_local_queue(true);
  4121   drain_global_stack(true);
  4123   do {
  4124     if (!has_aborted() && _curr_region != NULL) {
  4125       // This means that we're already holding on to a region.
  4126       assert(_finger != NULL, "if region is not NULL, then the finger "
  4127              "should not be NULL either");
  4129       // We might have restarted this task after an evacuation pause
  4130       // which might have evacuated the region we're holding on to
  4131       // underneath our feet. Let's read its limit again to make sure
  4132       // that we do not iterate over a region of the heap that
  4133       // contains garbage (update_region_limit() will also move
  4134       // _finger to the start of the region if it is found empty).
  4135       update_region_limit();
  4136       // We will start from _finger not from the start of the region,
  4137       // as we might be restarting this task after aborting half-way
  4138       // through scanning this region. In this case, _finger points to
  4139       // the address where we last found a marked object. If this is a
  4140       // fresh region, _finger points to start().
  4141       MemRegion mr = MemRegion(_finger, _region_limit);
  4143       if (_cm->verbose_low()) {
  4144         gclog_or_tty->print_cr("[%u] we're scanning part "
  4145                                "["PTR_FORMAT", "PTR_FORMAT") "
  4146                                "of region "HR_FORMAT,
  4147                                _worker_id, _finger, _region_limit,
  4148                                HR_FORMAT_PARAMS(_curr_region));
  4151       assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
  4152              "humongous regions should go around loop once only");
  4154       // Some special cases:
  4155       // If the memory region is empty, we can just give up the region.
  4156       // If the current region is humongous then we only need to check
  4157       // the bitmap for the bit associated with the start of the object,
  4158       // scan the object if it's live, and give up the region.
  4159       // Otherwise, let's iterate over the bitmap of the part of the region
  4160       // that is left.
  4161       // If the iteration is successful, give up the region.
  4162       if (mr.is_empty()) {
  4163         giveup_current_region();
  4164         regular_clock_call();
  4165       } else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
  4166         if (_nextMarkBitMap->isMarked(mr.start())) {
  4167           // The object is marked - apply the closure
  4168           BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
  4169           bitmap_closure.do_bit(offset);
  4171         // Even if this task aborted while scanning the humongous object
  4172         // we can (and should) give up the current region.
  4173         giveup_current_region();
  4174         regular_clock_call();
  4175       } else if (_nextMarkBitMap->iterate(&bitmap_closure, mr)) {
  4176         giveup_current_region();
  4177         regular_clock_call();
  4178       } else {
  4179         assert(has_aborted(), "currently the only way to do so");
  4180         // The only way to abort the bitmap iteration is to return
  4181         // false from the do_bit() method. However, inside the
  4182         // do_bit() method we move the _finger to point to the
  4183         // object currently being looked at. So, if we bail out, we
  4184         // have definitely set _finger to something non-null.
  4185         assert(_finger != NULL, "invariant");
  4187         // Region iteration was actually aborted. So now _finger
  4188         // points to the address of the object we last scanned. If we
  4189         // leave it there, when we restart this task, we will rescan
  4190         // the object. It is easy to avoid this. We move the finger by
  4191         // enough to point to the next possible object header (the
  4192         // bitmap knows by how much we need to move it as it knows its
  4193         // granularity).
  4194         assert(_finger < _region_limit, "invariant");
  4195         HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
  4196         // Check if bitmap iteration was aborted while scanning the last object
  4197         if (new_finger >= _region_limit) {
  4198           giveup_current_region();
  4199         } else {
  4200           move_finger_to(new_finger);
  4204     // At this point we have either completed iterating over the
  4205     // region we were holding on to, or we have aborted.
  4207     // We then partially drain the local queue and the global stack.
  4208     // (Do we really need this?)
  4209     drain_local_queue(true);
  4210     drain_global_stack(true);
  4212     // Read the note on the claim_region() method on why it might
  4213     // return NULL with potentially more regions available for
  4214     // claiming and why we have to check out_of_regions() to determine
  4215     // whether we're done or not.
  4216     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
  4217       // We are going to try to claim a new region. We should have
  4218       // given up on the previous one.
  4219       // Separated the asserts so that we know which one fires.
  4220       assert(_curr_region  == NULL, "invariant");
  4221       assert(_finger       == NULL, "invariant");
  4222       assert(_region_limit == NULL, "invariant");
  4223       if (_cm->verbose_low()) {
  4224         gclog_or_tty->print_cr("[%u] trying to claim a new region", _worker_id);
  4226       HeapRegion* claimed_region = _cm->claim_region(_worker_id);
  4227       if (claimed_region != NULL) {
  4228         // Yes, we managed to claim one
  4229         statsOnly( ++_regions_claimed );
  4231         if (_cm->verbose_low()) {
  4232           gclog_or_tty->print_cr("[%u] we successfully claimed "
  4233                                  "region "PTR_FORMAT,
  4234                                  _worker_id, claimed_region);
  4237         setup_for_region(claimed_region);
  4238         assert(_curr_region == claimed_region, "invariant");
  4240       // It is important to call the regular clock here. It might take
  4241       // a while to claim a region if, for example, we hit a large
  4242       // block of empty regions. So we need to call the regular clock
  4243       // method once round the loop to make sure it's called
  4244       // frequently enough.
  4245       regular_clock_call();
  4248     if (!has_aborted() && _curr_region == NULL) {
  4249       assert(_cm->out_of_regions(),
  4250              "at this point we should be out of regions");
  4252   } while ( _curr_region != NULL && !has_aborted());
  4254   if (!has_aborted()) {
  4255     // We cannot check whether the global stack is empty, since other
  4256     // tasks might be pushing objects to it concurrently.
  4257     assert(_cm->out_of_regions(),
  4258            "at this point we should be out of regions");
  4260     if (_cm->verbose_low()) {
  4261       gclog_or_tty->print_cr("[%u] all regions claimed", _worker_id);
  4264     // Try to reduce the number of available SATB buffers so that
  4265     // remark has less work to do.
  4266     drain_satb_buffers();
  4269   // Since we've done everything else, we can now totally drain the
  4270   // local queue and global stack.
  4271   drain_local_queue(false);
  4272   drain_global_stack(false);
  4274   // Attempt at work stealing from other task's queues.
  4275   if (do_stealing && !has_aborted()) {
  4276     // We have not aborted. This means that we have finished all that
  4277     // we could. Let's try to do some stealing...
  4279     // We cannot check whether the global stack is empty, since other
  4280     // tasks might be pushing objects to it concurrently.
  4281     assert(_cm->out_of_regions() && _task_queue->size() == 0,
  4282            "only way to reach here");
  4284     if (_cm->verbose_low()) {
  4285       gclog_or_tty->print_cr("[%u] starting to steal", _worker_id);
  4288     while (!has_aborted()) {
  4289       oop obj;
  4290       statsOnly( ++_steal_attempts );
  4292       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
  4293         if (_cm->verbose_medium()) {
  4294           gclog_or_tty->print_cr("[%u] stolen "PTR_FORMAT" successfully",
  4295                                  _worker_id, (void*) obj);
  4298         statsOnly( ++_steals );
  4300         assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
  4301                "any stolen object should be marked");
  4302         scan_object(obj);
  4304         // And since we're towards the end, let's totally drain the
  4305         // local queue and global stack.
  4306         drain_local_queue(false);
  4307         drain_global_stack(false);
  4308       } else {
  4309         break;
  4314   // If we are about to wrap up and go into termination, check if we
  4315   // should raise the overflow flag.
  4316   if (do_termination && !has_aborted()) {
  4317     if (_cm->force_overflow()->should_force()) {
  4318       _cm->set_has_overflown();
  4319       regular_clock_call();
  4323   // We still haven't aborted. Now, let's try to get into the
  4324   // termination protocol.
  4325   if (do_termination && !has_aborted()) {
  4326     // We cannot check whether the global stack is empty, since other
  4327     // tasks might be concurrently pushing objects on it.
  4328     // Separated the asserts so that we know which one fires.
  4329     assert(_cm->out_of_regions(), "only way to reach here");
  4330     assert(_task_queue->size() == 0, "only way to reach here");
  4332     if (_cm->verbose_low()) {
  4333       gclog_or_tty->print_cr("[%u] starting termination protocol", _worker_id);
  4336     _termination_start_time_ms = os::elapsedVTime() * 1000.0;
  4338     // The CMTask class also extends the TerminatorTerminator class,
  4339     // hence its should_exit_termination() method will also decide
  4340     // whether to exit the termination protocol or not.
  4341     bool finished = (is_serial ||
  4342                      _cm->terminator()->offer_termination(this));
  4343     double termination_end_time_ms = os::elapsedVTime() * 1000.0;
  4344     _termination_time_ms +=
  4345       termination_end_time_ms - _termination_start_time_ms;
  4347     if (finished) {
  4348       // We're all done.
  4350       if (_worker_id == 0) {
  4351         // let's allow task 0 to do this
  4352         if (concurrent()) {
  4353           assert(_cm->concurrent_marking_in_progress(), "invariant");
  4354           // we need to set this to false before the next
  4355           // safepoint. This way we ensure that the marking phase
  4356           // doesn't observe any more heap expansions.
  4357           _cm->clear_concurrent_marking_in_progress();
  4361       // We can now guarantee that the global stack is empty, since
  4362       // all other tasks have finished. We separated the guarantees so
  4363       // that, if a condition is false, we can immediately find out
  4364       // which one.
  4365       guarantee(_cm->out_of_regions(), "only way to reach here");
  4366       guarantee(_cm->mark_stack_empty(), "only way to reach here");
  4367       guarantee(_task_queue->size() == 0, "only way to reach here");
  4368       guarantee(!_cm->has_overflown(), "only way to reach here");
  4369       guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
  4371       if (_cm->verbose_low()) {
  4372         gclog_or_tty->print_cr("[%u] all tasks terminated", _worker_id);
  4374     } else {
  4375       // Apparently there's more work to do. Let's abort this task. It
  4376       // will restart it and we can hopefully find more things to do.
  4378       if (_cm->verbose_low()) {
  4379         gclog_or_tty->print_cr("[%u] apparently there is more work to do",
  4380                                _worker_id);
  4383       set_has_aborted();
  4384       statsOnly( ++_aborted_termination );
  4388   // Mainly for debugging purposes to make sure that a pointer to the
  4389   // closure which was statically allocated in this frame doesn't
  4390   // escape it by accident.
  4391   set_cm_oop_closure(NULL);
  4392   double end_time_ms = os::elapsedVTime() * 1000.0;
  4393   double elapsed_time_ms = end_time_ms - _start_time_ms;
  4394   // Update the step history.
  4395   _step_times_ms.add(elapsed_time_ms);
  4397   if (has_aborted()) {
  4398     // The task was aborted for some reason.
  4400     statsOnly( ++_aborted );
  4402     if (_has_timed_out) {
  4403       double diff_ms = elapsed_time_ms - _time_target_ms;
  4404       // Keep statistics of how well we did with respect to hitting
  4405       // our target only if we actually timed out (if we aborted for
  4406       // other reasons, then the results might get skewed).
  4407       _marking_step_diffs_ms.add(diff_ms);
  4410     if (_cm->has_overflown()) {
  4411       // This is the interesting one. We aborted because a global
  4412       // overflow was raised. This means we have to restart the
  4413       // marking phase and start iterating over regions. However, in
  4414       // order to do this we have to make sure that all tasks stop
  4415       // what they are doing and re-initialise in a safe manner. We
  4416       // will achieve this with the use of two barrier sync points.
  4418       if (_cm->verbose_low()) {
  4419         gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
  4422       if (!is_serial) {
  4423         // We only need to enter the sync barrier if being called
  4424         // from a parallel context
  4425         _cm->enter_first_sync_barrier(_worker_id);
  4427         // When we exit this sync barrier we know that all tasks have
  4428         // stopped doing marking work. So, it's now safe to
  4429         // re-initialise our data structures. At the end of this method,
  4430         // task 0 will clear the global data structures.
  4433       statsOnly( ++_aborted_overflow );
  4435       // We clear the local state of this task...
  4436       clear_region_fields();
  4438       if (!is_serial) {
  4439         // ...and enter the second barrier.
  4440         _cm->enter_second_sync_barrier(_worker_id);
  4442       // At this point, if we're during the concurrent phase of
  4443       // marking, everything has been re-initialized and we're
  4444       // ready to restart.
  4447     if (_cm->verbose_low()) {
  4448       gclog_or_tty->print_cr("[%u] <<<<<<<<<< ABORTING, target = %1.2lfms, "
  4449                              "elapsed = %1.2lfms <<<<<<<<<<",
  4450                              _worker_id, _time_target_ms, elapsed_time_ms);
  4451       if (_cm->has_aborted()) {
  4452         gclog_or_tty->print_cr("[%u] ========== MARKING ABORTED ==========",
  4453                                _worker_id);
  4456   } else {
  4457     if (_cm->verbose_low()) {
  4458       gclog_or_tty->print_cr("[%u] <<<<<<<<<< FINISHED, target = %1.2lfms, "
  4459                              "elapsed = %1.2lfms <<<<<<<<<<",
  4460                              _worker_id, _time_target_ms, elapsed_time_ms);
  4464   _claimed = false;
  4467 CMTask::CMTask(uint worker_id,
  4468                ConcurrentMark* cm,
  4469                size_t* marked_bytes,
  4470                BitMap* card_bm,
  4471                CMTaskQueue* task_queue,
  4472                CMTaskQueueSet* task_queues)
  4473   : _g1h(G1CollectedHeap::heap()),
  4474     _worker_id(worker_id), _cm(cm),
  4475     _claimed(false),
  4476     _nextMarkBitMap(NULL), _hash_seed(17),
  4477     _task_queue(task_queue),
  4478     _task_queues(task_queues),
  4479     _cm_oop_closure(NULL),
  4480     _marked_bytes_array(marked_bytes),
  4481     _card_bm(card_bm) {
  4482   guarantee(task_queue != NULL, "invariant");
  4483   guarantee(task_queues != NULL, "invariant");
  4485   statsOnly( _clock_due_to_scanning = 0;
  4486              _clock_due_to_marking  = 0 );
  4488   _marking_step_diffs_ms.add(0.5);
  4491 // These are formatting macros that are used below to ensure
  4492 // consistent formatting. The *_H_* versions are used to format the
  4493 // header for a particular value and they should be kept consistent
  4494 // with the corresponding macro. Also note that most of the macros add
  4495 // the necessary white space (as a prefix) which makes them a bit
  4496 // easier to compose.
  4498 // All the output lines are prefixed with this string to be able to
  4499 // identify them easily in a large log file.
  4500 #define G1PPRL_LINE_PREFIX            "###"
  4502 #define G1PPRL_ADDR_BASE_FORMAT    " "PTR_FORMAT"-"PTR_FORMAT
  4503 #ifdef _LP64
  4504 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
  4505 #else // _LP64
  4506 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
  4507 #endif // _LP64
  4509 // For per-region info
  4510 #define G1PPRL_TYPE_FORMAT            "   %-4s"
  4511 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
  4512 #define G1PPRL_BYTE_FORMAT            "  "SIZE_FORMAT_W(9)
  4513 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
  4514 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
  4515 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
  4517 // For summary info
  4518 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  "tag":"G1PPRL_ADDR_BASE_FORMAT
  4519 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  "tag": "SIZE_FORMAT
  4520 #define G1PPRL_SUM_MB_FORMAT(tag)      "  "tag": %1.2f MB"
  4521 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%"
  4523 G1PrintRegionLivenessInfoClosure::
  4524 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
  4525   : _out(out),
  4526     _total_used_bytes(0), _total_capacity_bytes(0),
  4527     _total_prev_live_bytes(0), _total_next_live_bytes(0),
  4528     _hum_used_bytes(0), _hum_capacity_bytes(0),
  4529     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
  4530     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
  4531   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  4532   MemRegion g1_committed = g1h->g1_committed();
  4533   MemRegion g1_reserved = g1h->g1_reserved();
  4534   double now = os::elapsedTime();
  4536   // Print the header of the output.
  4537   _out->cr();
  4538   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
  4539   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
  4540                  G1PPRL_SUM_ADDR_FORMAT("committed")
  4541                  G1PPRL_SUM_ADDR_FORMAT("reserved")
  4542                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
  4543                  g1_committed.start(), g1_committed.end(),
  4544                  g1_reserved.start(), g1_reserved.end(),
  4545                  HeapRegion::GrainBytes);
  4546   _out->print_cr(G1PPRL_LINE_PREFIX);
  4547   _out->print_cr(G1PPRL_LINE_PREFIX
  4548                 G1PPRL_TYPE_H_FORMAT
  4549                 G1PPRL_ADDR_BASE_H_FORMAT
  4550                 G1PPRL_BYTE_H_FORMAT
  4551                 G1PPRL_BYTE_H_FORMAT
  4552                 G1PPRL_BYTE_H_FORMAT
  4553                 G1PPRL_DOUBLE_H_FORMAT
  4554                 G1PPRL_BYTE_H_FORMAT
  4555                 G1PPRL_BYTE_H_FORMAT,
  4556                 "type", "address-range",
  4557                 "used", "prev-live", "next-live", "gc-eff",
  4558                 "remset", "code-roots");
  4559   _out->print_cr(G1PPRL_LINE_PREFIX
  4560                 G1PPRL_TYPE_H_FORMAT
  4561                 G1PPRL_ADDR_BASE_H_FORMAT
  4562                 G1PPRL_BYTE_H_FORMAT
  4563                 G1PPRL_BYTE_H_FORMAT
  4564                 G1PPRL_BYTE_H_FORMAT
  4565                 G1PPRL_DOUBLE_H_FORMAT
  4566                 G1PPRL_BYTE_H_FORMAT
  4567                 G1PPRL_BYTE_H_FORMAT,
  4568                 "", "",
  4569                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
  4570                 "(bytes)", "(bytes)");
  4573 // It takes as a parameter a reference to one of the _hum_* fields, it
  4574 // deduces the corresponding value for a region in a humongous region
  4575 // series (either the region size, or what's left if the _hum_* field
  4576 // is < the region size), and updates the _hum_* field accordingly.
  4577 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
  4578   size_t bytes = 0;
  4579   // The > 0 check is to deal with the prev and next live bytes which
  4580   // could be 0.
  4581   if (*hum_bytes > 0) {
  4582     bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
  4583     *hum_bytes -= bytes;
  4585   return bytes;
  4588 // It deduces the values for a region in a humongous region series
  4589 // from the _hum_* fields and updates those accordingly. It assumes
  4590 // that that _hum_* fields have already been set up from the "starts
  4591 // humongous" region and we visit the regions in address order.
  4592 void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
  4593                                                      size_t* capacity_bytes,
  4594                                                      size_t* prev_live_bytes,
  4595                                                      size_t* next_live_bytes) {
  4596   assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
  4597   *used_bytes      = get_hum_bytes(&_hum_used_bytes);
  4598   *capacity_bytes  = get_hum_bytes(&_hum_capacity_bytes);
  4599   *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
  4600   *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
  4603 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
  4604   const char* type = "";
  4605   HeapWord* bottom       = r->bottom();
  4606   HeapWord* end          = r->end();
  4607   size_t capacity_bytes  = r->capacity();
  4608   size_t used_bytes      = r->used();
  4609   size_t prev_live_bytes = r->live_bytes();
  4610   size_t next_live_bytes = r->next_live_bytes();
  4611   double gc_eff          = r->gc_efficiency();
  4612   size_t remset_bytes    = r->rem_set()->mem_size();
  4613   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
  4615   if (r->used() == 0) {
  4616     type = "FREE";
  4617   } else if (r->is_survivor()) {
  4618     type = "SURV";
  4619   } else if (r->is_young()) {
  4620     type = "EDEN";
  4621   } else if (r->startsHumongous()) {
  4622     type = "HUMS";
  4624     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
  4625            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
  4626            "they should have been zeroed after the last time we used them");
  4627     // Set up the _hum_* fields.
  4628     _hum_capacity_bytes  = capacity_bytes;
  4629     _hum_used_bytes      = used_bytes;
  4630     _hum_prev_live_bytes = prev_live_bytes;
  4631     _hum_next_live_bytes = next_live_bytes;
  4632     get_hum_bytes(&used_bytes, &capacity_bytes,
  4633                   &prev_live_bytes, &next_live_bytes);
  4634     end = bottom + HeapRegion::GrainWords;
  4635   } else if (r->continuesHumongous()) {
  4636     type = "HUMC";
  4637     get_hum_bytes(&used_bytes, &capacity_bytes,
  4638                   &prev_live_bytes, &next_live_bytes);
  4639     assert(end == bottom + HeapRegion::GrainWords, "invariant");
  4640   } else {
  4641     type = "OLD";
  4644   _total_used_bytes      += used_bytes;
  4645   _total_capacity_bytes  += capacity_bytes;
  4646   _total_prev_live_bytes += prev_live_bytes;
  4647   _total_next_live_bytes += next_live_bytes;
  4648   _total_remset_bytes    += remset_bytes;
  4649   _total_strong_code_roots_bytes += strong_code_roots_bytes;
  4651   // Print a line for this particular region.
  4652   _out->print_cr(G1PPRL_LINE_PREFIX
  4653                  G1PPRL_TYPE_FORMAT
  4654                  G1PPRL_ADDR_BASE_FORMAT
  4655                  G1PPRL_BYTE_FORMAT
  4656                  G1PPRL_BYTE_FORMAT
  4657                  G1PPRL_BYTE_FORMAT
  4658                  G1PPRL_DOUBLE_FORMAT
  4659                  G1PPRL_BYTE_FORMAT
  4660                  G1PPRL_BYTE_FORMAT,
  4661                  type, bottom, end,
  4662                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
  4663                  remset_bytes, strong_code_roots_bytes);
  4665   return false;
  4668 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
  4669   // add static memory usages to remembered set sizes
  4670   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
  4671   // Print the footer of the output.
  4672   _out->print_cr(G1PPRL_LINE_PREFIX);
  4673   _out->print_cr(G1PPRL_LINE_PREFIX
  4674                  " SUMMARY"
  4675                  G1PPRL_SUM_MB_FORMAT("capacity")
  4676                  G1PPRL_SUM_MB_PERC_FORMAT("used")
  4677                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
  4678                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
  4679                  G1PPRL_SUM_MB_FORMAT("remset")
  4680                  G1PPRL_SUM_MB_FORMAT("code-roots"),
  4681                  bytes_to_mb(_total_capacity_bytes),
  4682                  bytes_to_mb(_total_used_bytes),
  4683                  perc(_total_used_bytes, _total_capacity_bytes),
  4684                  bytes_to_mb(_total_prev_live_bytes),
  4685                  perc(_total_prev_live_bytes, _total_capacity_bytes),
  4686                  bytes_to_mb(_total_next_live_bytes),
  4687                  perc(_total_next_live_bytes, _total_capacity_bytes),
  4688                  bytes_to_mb(_total_remset_bytes),
  4689                  bytes_to_mb(_total_strong_code_roots_bytes));
  4690   _out->cr();

mercurial