src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Thu, 26 Sep 2013 10:25:02 -0400

author
hseigel
date
Thu, 26 Sep 2013 10:25:02 -0400
changeset 5784
190899198332
parent 5694
7944aba7ba41
child 6088
40b8c6bad703
permissions
-rw-r--r--

7195622: CheckUnhandledOops has limited usefulness now
Summary: Enable CHECK_UNHANDLED_OOPS in fastdebug builds across all supported platforms.
Reviewed-by: coleenp, hseigel, dholmes, stefank, twisti, ihse, rdurbin
Contributed-by: lois.foltan@oracle.com

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/classLoaderData.hpp"
    27 #include "classfile/symbolTable.hpp"
    28 #include "classfile/systemDictionary.hpp"
    29 #include "code/codeCache.hpp"
    30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
    31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
    32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
    33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
    34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
    35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
    36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
    38 #include "gc_implementation/parNew/parNewGeneration.hpp"
    39 #include "gc_implementation/shared/collectorCounters.hpp"
    40 #include "gc_implementation/shared/gcTimer.hpp"
    41 #include "gc_implementation/shared/gcTrace.hpp"
    42 #include "gc_implementation/shared/gcTraceTime.hpp"
    43 #include "gc_implementation/shared/isGCActiveMark.hpp"
    44 #include "gc_interface/collectedHeap.inline.hpp"
    45 #include "memory/allocation.hpp"
    46 #include "memory/cardTableRS.hpp"
    47 #include "memory/collectorPolicy.hpp"
    48 #include "memory/gcLocker.inline.hpp"
    49 #include "memory/genCollectedHeap.hpp"
    50 #include "memory/genMarkSweep.hpp"
    51 #include "memory/genOopClosures.inline.hpp"
    52 #include "memory/iterator.hpp"
    53 #include "memory/padded.hpp"
    54 #include "memory/referencePolicy.hpp"
    55 #include "memory/resourceArea.hpp"
    56 #include "memory/tenuredGeneration.hpp"
    57 #include "oops/oop.inline.hpp"
    58 #include "prims/jvmtiExport.hpp"
    59 #include "runtime/globals_extension.hpp"
    60 #include "runtime/handles.inline.hpp"
    61 #include "runtime/java.hpp"
    62 #include "runtime/vmThread.hpp"
    63 #include "services/memoryService.hpp"
    64 #include "services/runtimeService.hpp"
    66 // statics
    67 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
    68 bool CMSCollector::_full_gc_requested = false;
    69 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
    71 //////////////////////////////////////////////////////////////////
    72 // In support of CMS/VM thread synchronization
    73 //////////////////////////////////////////////////////////////////
    74 // We split use of the CGC_lock into 2 "levels".
    75 // The low-level locking is of the usual CGC_lock monitor. We introduce
    76 // a higher level "token" (hereafter "CMS token") built on top of the
    77 // low level monitor (hereafter "CGC lock").
    78 // The token-passing protocol gives priority to the VM thread. The
    79 // CMS-lock doesn't provide any fairness guarantees, but clients
    80 // should ensure that it is only held for very short, bounded
    81 // durations.
    82 //
    83 // When either of the CMS thread or the VM thread is involved in
    84 // collection operations during which it does not want the other
    85 // thread to interfere, it obtains the CMS token.
    86 //
    87 // If either thread tries to get the token while the other has
    88 // it, that thread waits. However, if the VM thread and CMS thread
    89 // both want the token, then the VM thread gets priority while the
    90 // CMS thread waits. This ensures, for instance, that the "concurrent"
    91 // phases of the CMS thread's work do not block out the VM thread
    92 // for long periods of time as the CMS thread continues to hog
    93 // the token. (See bug 4616232).
    94 //
    95 // The baton-passing functions are, however, controlled by the
    96 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
    97 // and here the low-level CMS lock, not the high level token,
    98 // ensures mutual exclusion.
    99 //
   100 // Two important conditions that we have to satisfy:
   101 // 1. if a thread does a low-level wait on the CMS lock, then it
   102 //    relinquishes the CMS token if it were holding that token
   103 //    when it acquired the low-level CMS lock.
   104 // 2. any low-level notifications on the low-level lock
   105 //    should only be sent when a thread has relinquished the token.
   106 //
   107 // In the absence of either property, we'd have potential deadlock.
   108 //
   109 // We protect each of the CMS (concurrent and sequential) phases
   110 // with the CMS _token_, not the CMS _lock_.
   111 //
   112 // The only code protected by CMS lock is the token acquisition code
   113 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
   114 // baton-passing code.
   115 //
   116 // Unfortunately, i couldn't come up with a good abstraction to factor and
   117 // hide the naked CGC_lock manipulation in the baton-passing code
   118 // further below. That's something we should try to do. Also, the proof
   119 // of correctness of this 2-level locking scheme is far from obvious,
   120 // and potentially quite slippery. We have an uneasy supsicion, for instance,
   121 // that there may be a theoretical possibility of delay/starvation in the
   122 // low-level lock/wait/notify scheme used for the baton-passing because of
   123 // potential intereference with the priority scheme embodied in the
   124 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
   125 // invocation further below and marked with "XXX 20011219YSR".
   126 // Indeed, as we note elsewhere, this may become yet more slippery
   127 // in the presence of multiple CMS and/or multiple VM threads. XXX
   129 class CMSTokenSync: public StackObj {
   130  private:
   131   bool _is_cms_thread;
   132  public:
   133   CMSTokenSync(bool is_cms_thread):
   134     _is_cms_thread(is_cms_thread) {
   135     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
   136            "Incorrect argument to constructor");
   137     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
   138   }
   140   ~CMSTokenSync() {
   141     assert(_is_cms_thread ?
   142              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
   143              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
   144           "Incorrect state");
   145     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
   146   }
   147 };
   149 // Convenience class that does a CMSTokenSync, and then acquires
   150 // upto three locks.
   151 class CMSTokenSyncWithLocks: public CMSTokenSync {
   152  private:
   153   // Note: locks are acquired in textual declaration order
   154   // and released in the opposite order
   155   MutexLockerEx _locker1, _locker2, _locker3;
   156  public:
   157   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
   158                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
   159     CMSTokenSync(is_cms_thread),
   160     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
   161     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
   162     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
   163   { }
   164 };
   167 // Wrapper class to temporarily disable icms during a foreground cms collection.
   168 class ICMSDisabler: public StackObj {
   169  public:
   170   // The ctor disables icms and wakes up the thread so it notices the change;
   171   // the dtor re-enables icms.  Note that the CMSCollector methods will check
   172   // CMSIncrementalMode.
   173   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
   174   ~ICMSDisabler() { CMSCollector::enable_icms(); }
   175 };
   177 //////////////////////////////////////////////////////////////////
   178 //  Concurrent Mark-Sweep Generation /////////////////////////////
   179 //////////////////////////////////////////////////////////////////
   181 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
   183 // This struct contains per-thread things necessary to support parallel
   184 // young-gen collection.
   185 class CMSParGCThreadState: public CHeapObj<mtGC> {
   186  public:
   187   CFLS_LAB lab;
   188   PromotionInfo promo;
   190   // Constructor.
   191   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
   192     promo.setSpace(cfls);
   193   }
   194 };
   196 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
   197      ReservedSpace rs, size_t initial_byte_size, int level,
   198      CardTableRS* ct, bool use_adaptive_freelists,
   199      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
   200   CardGeneration(rs, initial_byte_size, level, ct),
   201   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
   202   _debug_collection_type(Concurrent_collection_type),
   203   _did_compact(false)
   204 {
   205   HeapWord* bottom = (HeapWord*) _virtual_space.low();
   206   HeapWord* end    = (HeapWord*) _virtual_space.high();
   208   _direct_allocated_words = 0;
   209   NOT_PRODUCT(
   210     _numObjectsPromoted = 0;
   211     _numWordsPromoted = 0;
   212     _numObjectsAllocated = 0;
   213     _numWordsAllocated = 0;
   214   )
   216   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
   217                                            use_adaptive_freelists,
   218                                            dictionaryChoice);
   219   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
   220   if (_cmsSpace == NULL) {
   221     vm_exit_during_initialization(
   222       "CompactibleFreeListSpace allocation failure");
   223   }
   224   _cmsSpace->_gen = this;
   226   _gc_stats = new CMSGCStats();
   228   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
   229   // offsets match. The ability to tell free chunks from objects
   230   // depends on this property.
   231   debug_only(
   232     FreeChunk* junk = NULL;
   233     assert(UseCompressedClassPointers ||
   234            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
   235            "Offset of FreeChunk::_prev within FreeChunk must match"
   236            "  that of OopDesc::_klass within OopDesc");
   237   )
   238   if (CollectedHeap::use_parallel_gc_threads()) {
   239     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
   240     _par_gc_thread_states =
   241       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
   242     if (_par_gc_thread_states == NULL) {
   243       vm_exit_during_initialization("Could not allocate par gc structs");
   244     }
   245     for (uint i = 0; i < ParallelGCThreads; i++) {
   246       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
   247       if (_par_gc_thread_states[i] == NULL) {
   248         vm_exit_during_initialization("Could not allocate par gc structs");
   249       }
   250     }
   251   } else {
   252     _par_gc_thread_states = NULL;
   253   }
   254   _incremental_collection_failed = false;
   255   // The "dilatation_factor" is the expansion that can occur on
   256   // account of the fact that the minimum object size in the CMS
   257   // generation may be larger than that in, say, a contiguous young
   258   //  generation.
   259   // Ideally, in the calculation below, we'd compute the dilatation
   260   // factor as: MinChunkSize/(promoting_gen's min object size)
   261   // Since we do not have such a general query interface for the
   262   // promoting generation, we'll instead just use the mimimum
   263   // object size (which today is a header's worth of space);
   264   // note that all arithmetic is in units of HeapWords.
   265   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
   266   assert(_dilatation_factor >= 1.0, "from previous assert");
   267 }
   270 // The field "_initiating_occupancy" represents the occupancy percentage
   271 // at which we trigger a new collection cycle.  Unless explicitly specified
   272 // via CMSInitiatingOccupancyFraction (argument "io" below), it
   273 // is calculated by:
   274 //
   275 //   Let "f" be MinHeapFreeRatio in
   276 //
   277 //    _intiating_occupancy = 100-f +
   278 //                           f * (CMSTriggerRatio/100)
   279 //   where CMSTriggerRatio is the argument "tr" below.
   280 //
   281 // That is, if we assume the heap is at its desired maximum occupancy at the
   282 // end of a collection, we let CMSTriggerRatio of the (purported) free
   283 // space be allocated before initiating a new collection cycle.
   284 //
   285 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
   286   assert(io <= 100 && tr <= 100, "Check the arguments");
   287   if (io >= 0) {
   288     _initiating_occupancy = (double)io / 100.0;
   289   } else {
   290     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
   291                              (double)(tr * MinHeapFreeRatio) / 100.0)
   292                             / 100.0;
   293   }
   294 }
   296 void ConcurrentMarkSweepGeneration::ref_processor_init() {
   297   assert(collector() != NULL, "no collector");
   298   collector()->ref_processor_init();
   299 }
   301 void CMSCollector::ref_processor_init() {
   302   if (_ref_processor == NULL) {
   303     // Allocate and initialize a reference processor
   304     _ref_processor =
   305       new ReferenceProcessor(_span,                               // span
   306                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
   307                              (int) ParallelGCThreads,             // mt processing degree
   308                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
   309                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
   310                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
   311                              &_is_alive_closure,                  // closure for liveness info
   312                              false);                              // next field updates do not need write barrier
   313     // Initialize the _ref_processor field of CMSGen
   314     _cmsGen->set_ref_processor(_ref_processor);
   316   }
   317 }
   319 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
   320   GenCollectedHeap* gch = GenCollectedHeap::heap();
   321   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
   322     "Wrong type of heap");
   323   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
   324     gch->gen_policy()->size_policy();
   325   assert(sp->is_gc_cms_adaptive_size_policy(),
   326     "Wrong type of size policy");
   327   return sp;
   328 }
   330 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
   331   CMSGCAdaptivePolicyCounters* results =
   332     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
   333   assert(
   334     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
   335     "Wrong gc policy counter kind");
   336   return results;
   337 }
   340 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
   342   const char* gen_name = "old";
   344   // Generation Counters - generation 1, 1 subspace
   345   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
   347   _space_counters = new GSpaceCounters(gen_name, 0,
   348                                        _virtual_space.reserved_size(),
   349                                        this, _gen_counters);
   350 }
   352 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
   353   _cms_gen(cms_gen)
   354 {
   355   assert(alpha <= 100, "bad value");
   356   _saved_alpha = alpha;
   358   // Initialize the alphas to the bootstrap value of 100.
   359   _gc0_alpha = _cms_alpha = 100;
   361   _cms_begin_time.update();
   362   _cms_end_time.update();
   364   _gc0_duration = 0.0;
   365   _gc0_period = 0.0;
   366   _gc0_promoted = 0;
   368   _cms_duration = 0.0;
   369   _cms_period = 0.0;
   370   _cms_allocated = 0;
   372   _cms_used_at_gc0_begin = 0;
   373   _cms_used_at_gc0_end = 0;
   374   _allow_duty_cycle_reduction = false;
   375   _valid_bits = 0;
   376   _icms_duty_cycle = CMSIncrementalDutyCycle;
   377 }
   379 double CMSStats::cms_free_adjustment_factor(size_t free) const {
   380   // TBD: CR 6909490
   381   return 1.0;
   382 }
   384 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
   385 }
   387 // If promotion failure handling is on use
   388 // the padded average size of the promotion for each
   389 // young generation collection.
   390 double CMSStats::time_until_cms_gen_full() const {
   391   size_t cms_free = _cms_gen->cmsSpace()->free();
   392   GenCollectedHeap* gch = GenCollectedHeap::heap();
   393   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
   394                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
   395   if (cms_free > expected_promotion) {
   396     // Start a cms collection if there isn't enough space to promote
   397     // for the next minor collection.  Use the padded average as
   398     // a safety factor.
   399     cms_free -= expected_promotion;
   401     // Adjust by the safety factor.
   402     double cms_free_dbl = (double)cms_free;
   403     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
   404     // Apply a further correction factor which tries to adjust
   405     // for recent occurance of concurrent mode failures.
   406     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
   407     cms_free_dbl = cms_free_dbl * cms_adjustment;
   409     if (PrintGCDetails && Verbose) {
   410       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
   411         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
   412         cms_free, expected_promotion);
   413       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
   414         cms_free_dbl, cms_consumption_rate() + 1.0);
   415     }
   416     // Add 1 in case the consumption rate goes to zero.
   417     return cms_free_dbl / (cms_consumption_rate() + 1.0);
   418   }
   419   return 0.0;
   420 }
   422 // Compare the duration of the cms collection to the
   423 // time remaining before the cms generation is empty.
   424 // Note that the time from the start of the cms collection
   425 // to the start of the cms sweep (less than the total
   426 // duration of the cms collection) can be used.  This
   427 // has been tried and some applications experienced
   428 // promotion failures early in execution.  This was
   429 // possibly because the averages were not accurate
   430 // enough at the beginning.
   431 double CMSStats::time_until_cms_start() const {
   432   // We add "gc0_period" to the "work" calculation
   433   // below because this query is done (mostly) at the
   434   // end of a scavenge, so we need to conservatively
   435   // account for that much possible delay
   436   // in the query so as to avoid concurrent mode failures
   437   // due to starting the collection just a wee bit too
   438   // late.
   439   double work = cms_duration() + gc0_period();
   440   double deadline = time_until_cms_gen_full();
   441   // If a concurrent mode failure occurred recently, we want to be
   442   // more conservative and halve our expected time_until_cms_gen_full()
   443   if (work > deadline) {
   444     if (Verbose && PrintGCDetails) {
   445       gclog_or_tty->print(
   446         " CMSCollector: collect because of anticipated promotion "
   447         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
   448         gc0_period(), time_until_cms_gen_full());
   449     }
   450     return 0.0;
   451   }
   452   return work - deadline;
   453 }
   455 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
   456 // amount of change to prevent wild oscillation.
   457 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
   458                                               unsigned int new_duty_cycle) {
   459   assert(old_duty_cycle <= 100, "bad input value");
   460   assert(new_duty_cycle <= 100, "bad input value");
   462   // Note:  use subtraction with caution since it may underflow (values are
   463   // unsigned).  Addition is safe since we're in the range 0-100.
   464   unsigned int damped_duty_cycle = new_duty_cycle;
   465   if (new_duty_cycle < old_duty_cycle) {
   466     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
   467     if (new_duty_cycle + largest_delta < old_duty_cycle) {
   468       damped_duty_cycle = old_duty_cycle - largest_delta;
   469     }
   470   } else if (new_duty_cycle > old_duty_cycle) {
   471     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
   472     if (new_duty_cycle > old_duty_cycle + largest_delta) {
   473       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
   474     }
   475   }
   476   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
   478   if (CMSTraceIncrementalPacing) {
   479     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
   480                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
   481   }
   482   return damped_duty_cycle;
   483 }
   485 unsigned int CMSStats::icms_update_duty_cycle_impl() {
   486   assert(CMSIncrementalPacing && valid(),
   487          "should be handled in icms_update_duty_cycle()");
   489   double cms_time_so_far = cms_timer().seconds();
   490   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
   491   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
   493   // Avoid division by 0.
   494   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
   495   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
   497   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
   498   if (new_duty_cycle > _icms_duty_cycle) {
   499     // Avoid very small duty cycles (1 or 2); 0 is allowed.
   500     if (new_duty_cycle > 2) {
   501       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
   502                                                 new_duty_cycle);
   503     }
   504   } else if (_allow_duty_cycle_reduction) {
   505     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
   506     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
   507     // Respect the minimum duty cycle.
   508     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
   509     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
   510   }
   512   if (PrintGCDetails || CMSTraceIncrementalPacing) {
   513     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
   514   }
   516   _allow_duty_cycle_reduction = false;
   517   return _icms_duty_cycle;
   518 }
   520 #ifndef PRODUCT
   521 void CMSStats::print_on(outputStream *st) const {
   522   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
   523   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
   524                gc0_duration(), gc0_period(), gc0_promoted());
   525   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
   526             cms_duration(), cms_duration_per_mb(),
   527             cms_period(), cms_allocated());
   528   st->print(",cms_since_beg=%g,cms_since_end=%g",
   529             cms_time_since_begin(), cms_time_since_end());
   530   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
   531             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
   532   if (CMSIncrementalMode) {
   533     st->print(",dc=%d", icms_duty_cycle());
   534   }
   536   if (valid()) {
   537     st->print(",promo_rate=%g,cms_alloc_rate=%g",
   538               promotion_rate(), cms_allocation_rate());
   539     st->print(",cms_consumption_rate=%g,time_until_full=%g",
   540               cms_consumption_rate(), time_until_cms_gen_full());
   541   }
   542   st->print(" ");
   543 }
   544 #endif // #ifndef PRODUCT
   546 CMSCollector::CollectorState CMSCollector::_collectorState =
   547                              CMSCollector::Idling;
   548 bool CMSCollector::_foregroundGCIsActive = false;
   549 bool CMSCollector::_foregroundGCShouldWait = false;
   551 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
   552                            CardTableRS*                   ct,
   553                            ConcurrentMarkSweepPolicy*     cp):
   554   _cmsGen(cmsGen),
   555   _ct(ct),
   556   _ref_processor(NULL),    // will be set later
   557   _conc_workers(NULL),     // may be set later
   558   _abort_preclean(false),
   559   _start_sampling(false),
   560   _between_prologue_and_epilogue(false),
   561   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
   562   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
   563                  -1 /* lock-free */, "No_lock" /* dummy */),
   564   _modUnionClosure(&_modUnionTable),
   565   _modUnionClosurePar(&_modUnionTable),
   566   // Adjust my span to cover old (cms) gen
   567   _span(cmsGen->reserved()),
   568   // Construct the is_alive_closure with _span & markBitMap
   569   _is_alive_closure(_span, &_markBitMap),
   570   _restart_addr(NULL),
   571   _overflow_list(NULL),
   572   _stats(cmsGen),
   573   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
   574   _eden_chunk_array(NULL),     // may be set in ctor body
   575   _eden_chunk_capacity(0),     // -- ditto --
   576   _eden_chunk_index(0),        // -- ditto --
   577   _survivor_plab_array(NULL),  // -- ditto --
   578   _survivor_chunk_array(NULL), // -- ditto --
   579   _survivor_chunk_capacity(0), // -- ditto --
   580   _survivor_chunk_index(0),    // -- ditto --
   581   _ser_pmc_preclean_ovflw(0),
   582   _ser_kac_preclean_ovflw(0),
   583   _ser_pmc_remark_ovflw(0),
   584   _par_pmc_remark_ovflw(0),
   585   _ser_kac_ovflw(0),
   586   _par_kac_ovflw(0),
   587 #ifndef PRODUCT
   588   _num_par_pushes(0),
   589 #endif
   590   _collection_count_start(0),
   591   _verifying(false),
   592   _icms_start_limit(NULL),
   593   _icms_stop_limit(NULL),
   594   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
   595   _completed_initialization(false),
   596   _collector_policy(cp),
   597   _should_unload_classes(false),
   598   _concurrent_cycles_since_last_unload(0),
   599   _roots_scanning_options(0),
   600   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
   601   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
   602   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
   603   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
   604   _cms_start_registered(false)
   605 {
   606   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
   607     ExplicitGCInvokesConcurrent = true;
   608   }
   609   // Now expand the span and allocate the collection support structures
   610   // (MUT, marking bit map etc.) to cover both generations subject to
   611   // collection.
   613   // For use by dirty card to oop closures.
   614   _cmsGen->cmsSpace()->set_collector(this);
   616   // Allocate MUT and marking bit map
   617   {
   618     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
   619     if (!_markBitMap.allocate(_span)) {
   620       warning("Failed to allocate CMS Bit Map");
   621       return;
   622     }
   623     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
   624   }
   625   {
   626     _modUnionTable.allocate(_span);
   627     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
   628   }
   630   if (!_markStack.allocate(MarkStackSize)) {
   631     warning("Failed to allocate CMS Marking Stack");
   632     return;
   633   }
   635   // Support for multi-threaded concurrent phases
   636   if (CMSConcurrentMTEnabled) {
   637     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
   638       // just for now
   639       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
   640     }
   641     if (ConcGCThreads > 1) {
   642       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
   643                                  ConcGCThreads, true);
   644       if (_conc_workers == NULL) {
   645         warning("GC/CMS: _conc_workers allocation failure: "
   646               "forcing -CMSConcurrentMTEnabled");
   647         CMSConcurrentMTEnabled = false;
   648       } else {
   649         _conc_workers->initialize_workers();
   650       }
   651     } else {
   652       CMSConcurrentMTEnabled = false;
   653     }
   654   }
   655   if (!CMSConcurrentMTEnabled) {
   656     ConcGCThreads = 0;
   657   } else {
   658     // Turn off CMSCleanOnEnter optimization temporarily for
   659     // the MT case where it's not fixed yet; see 6178663.
   660     CMSCleanOnEnter = false;
   661   }
   662   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
   663          "Inconsistency");
   665   // Parallel task queues; these are shared for the
   666   // concurrent and stop-world phases of CMS, but
   667   // are not shared with parallel scavenge (ParNew).
   668   {
   669     uint i;
   670     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
   672     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
   673          || ParallelRefProcEnabled)
   674         && num_queues > 0) {
   675       _task_queues = new OopTaskQueueSet(num_queues);
   676       if (_task_queues == NULL) {
   677         warning("task_queues allocation failure.");
   678         return;
   679       }
   680       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
   681       if (_hash_seed == NULL) {
   682         warning("_hash_seed array allocation failure");
   683         return;
   684       }
   686       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
   687       for (i = 0; i < num_queues; i++) {
   688         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
   689         if (q == NULL) {
   690           warning("work_queue allocation failure.");
   691           return;
   692         }
   693         _task_queues->register_queue(i, q);
   694       }
   695       for (i = 0; i < num_queues; i++) {
   696         _task_queues->queue(i)->initialize();
   697         _hash_seed[i] = 17;  // copied from ParNew
   698       }
   699     }
   700   }
   702   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
   704   // Clip CMSBootstrapOccupancy between 0 and 100.
   705   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
   707   _full_gcs_since_conc_gc = 0;
   709   // Now tell CMS generations the identity of their collector
   710   ConcurrentMarkSweepGeneration::set_collector(this);
   712   // Create & start a CMS thread for this CMS collector
   713   _cmsThread = ConcurrentMarkSweepThread::start(this);
   714   assert(cmsThread() != NULL, "CMS Thread should have been created");
   715   assert(cmsThread()->collector() == this,
   716          "CMS Thread should refer to this gen");
   717   assert(CGC_lock != NULL, "Where's the CGC_lock?");
   719   // Support for parallelizing young gen rescan
   720   GenCollectedHeap* gch = GenCollectedHeap::heap();
   721   _young_gen = gch->prev_gen(_cmsGen);
   722   if (gch->supports_inline_contig_alloc()) {
   723     _top_addr = gch->top_addr();
   724     _end_addr = gch->end_addr();
   725     assert(_young_gen != NULL, "no _young_gen");
   726     _eden_chunk_index = 0;
   727     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
   728     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
   729     if (_eden_chunk_array == NULL) {
   730       _eden_chunk_capacity = 0;
   731       warning("GC/CMS: _eden_chunk_array allocation failure");
   732     }
   733   }
   734   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
   736   // Support for parallelizing survivor space rescan
   737   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
   738     const size_t max_plab_samples =
   739       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
   741     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
   742     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
   743     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
   744     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
   745         || _cursor == NULL) {
   746       warning("Failed to allocate survivor plab/chunk array");
   747       if (_survivor_plab_array  != NULL) {
   748         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
   749         _survivor_plab_array = NULL;
   750       }
   751       if (_survivor_chunk_array != NULL) {
   752         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
   753         _survivor_chunk_array = NULL;
   754       }
   755       if (_cursor != NULL) {
   756         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
   757         _cursor = NULL;
   758       }
   759     } else {
   760       _survivor_chunk_capacity = 2*max_plab_samples;
   761       for (uint i = 0; i < ParallelGCThreads; i++) {
   762         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
   763         if (vec == NULL) {
   764           warning("Failed to allocate survivor plab array");
   765           for (int j = i; j > 0; j--) {
   766             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
   767           }
   768           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
   769           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
   770           _survivor_plab_array = NULL;
   771           _survivor_chunk_array = NULL;
   772           _survivor_chunk_capacity = 0;
   773           break;
   774         } else {
   775           ChunkArray* cur =
   776             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
   777                                                         max_plab_samples);
   778           assert(cur->end() == 0, "Should be 0");
   779           assert(cur->array() == vec, "Should be vec");
   780           assert(cur->capacity() == max_plab_samples, "Error");
   781         }
   782       }
   783     }
   784   }
   785   assert(   (   _survivor_plab_array  != NULL
   786              && _survivor_chunk_array != NULL)
   787          || (   _survivor_chunk_capacity == 0
   788              && _survivor_chunk_index == 0),
   789          "Error");
   791   // Choose what strong roots should be scanned depending on verification options
   792   if (!CMSClassUnloadingEnabled) {
   793     // If class unloading is disabled we want to include all classes into the root set.
   794     add_root_scanning_option(SharedHeap::SO_AllClasses);
   795   } else {
   796     add_root_scanning_option(SharedHeap::SO_SystemClasses);
   797   }
   799   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
   800   _gc_counters = new CollectorCounters("CMS", 1);
   801   _completed_initialization = true;
   802   _inter_sweep_timer.start();  // start of time
   803 }
   805 const char* ConcurrentMarkSweepGeneration::name() const {
   806   return "concurrent mark-sweep generation";
   807 }
   808 void ConcurrentMarkSweepGeneration::update_counters() {
   809   if (UsePerfData) {
   810     _space_counters->update_all();
   811     _gen_counters->update_all();
   812   }
   813 }
   815 // this is an optimized version of update_counters(). it takes the
   816 // used value as a parameter rather than computing it.
   817 //
   818 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
   819   if (UsePerfData) {
   820     _space_counters->update_used(used);
   821     _space_counters->update_capacity();
   822     _gen_counters->update_all();
   823   }
   824 }
   826 void ConcurrentMarkSweepGeneration::print() const {
   827   Generation::print();
   828   cmsSpace()->print();
   829 }
   831 #ifndef PRODUCT
   832 void ConcurrentMarkSweepGeneration::print_statistics() {
   833   cmsSpace()->printFLCensus(0);
   834 }
   835 #endif
   837 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
   838   GenCollectedHeap* gch = GenCollectedHeap::heap();
   839   if (PrintGCDetails) {
   840     if (Verbose) {
   841       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
   842         level(), short_name(), s, used(), capacity());
   843     } else {
   844       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
   845         level(), short_name(), s, used() / K, capacity() / K);
   846     }
   847   }
   848   if (Verbose) {
   849     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
   850               gch->used(), gch->capacity());
   851   } else {
   852     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
   853               gch->used() / K, gch->capacity() / K);
   854   }
   855 }
   857 size_t
   858 ConcurrentMarkSweepGeneration::contiguous_available() const {
   859   // dld proposes an improvement in precision here. If the committed
   860   // part of the space ends in a free block we should add that to
   861   // uncommitted size in the calculation below. Will make this
   862   // change later, staying with the approximation below for the
   863   // time being. -- ysr.
   864   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
   865 }
   867 size_t
   868 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
   869   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
   870 }
   872 size_t ConcurrentMarkSweepGeneration::max_available() const {
   873   return free() + _virtual_space.uncommitted_size();
   874 }
   876 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   877   size_t available = max_available();
   878   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
   879   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
   880   if (Verbose && PrintGCDetails) {
   881     gclog_or_tty->print_cr(
   882       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
   883       "max_promo("SIZE_FORMAT")",
   884       res? "":" not", available, res? ">=":"<",
   885       av_promo, max_promotion_in_bytes);
   886   }
   887   return res;
   888 }
   890 // At a promotion failure dump information on block layout in heap
   891 // (cms old generation).
   892 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
   893   if (CMSDumpAtPromotionFailure) {
   894     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
   895   }
   896 }
   898 CompactibleSpace*
   899 ConcurrentMarkSweepGeneration::first_compaction_space() const {
   900   return _cmsSpace;
   901 }
   903 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
   904   // Clear the promotion information.  These pointers can be adjusted
   905   // along with all the other pointers into the heap but
   906   // compaction is expected to be a rare event with
   907   // a heap using cms so don't do it without seeing the need.
   908   if (CollectedHeap::use_parallel_gc_threads()) {
   909     for (uint i = 0; i < ParallelGCThreads; i++) {
   910       _par_gc_thread_states[i]->promo.reset();
   911     }
   912   }
   913 }
   915 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
   916   blk->do_space(_cmsSpace);
   917 }
   919 void ConcurrentMarkSweepGeneration::compute_new_size() {
   920   assert_locked_or_safepoint(Heap_lock);
   922   // If incremental collection failed, we just want to expand
   923   // to the limit.
   924   if (incremental_collection_failed()) {
   925     clear_incremental_collection_failed();
   926     grow_to_reserved();
   927     return;
   928   }
   930   // The heap has been compacted but not reset yet.
   931   // Any metric such as free() or used() will be incorrect.
   933   CardGeneration::compute_new_size();
   935   // Reset again after a possible resizing
   936   if (did_compact()) {
   937     cmsSpace()->reset_after_compaction();
   938   }
   939 }
   941 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
   942   assert_locked_or_safepoint(Heap_lock);
   944   // If incremental collection failed, we just want to expand
   945   // to the limit.
   946   if (incremental_collection_failed()) {
   947     clear_incremental_collection_failed();
   948     grow_to_reserved();
   949     return;
   950   }
   952   double free_percentage = ((double) free()) / capacity();
   953   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
   954   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
   956   // compute expansion delta needed for reaching desired free percentage
   957   if (free_percentage < desired_free_percentage) {
   958     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
   959     assert(desired_capacity >= capacity(), "invalid expansion size");
   960     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
   961     if (PrintGCDetails && Verbose) {
   962       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
   963       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
   964       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
   965       gclog_or_tty->print_cr("  Desired free fraction %f",
   966         desired_free_percentage);
   967       gclog_or_tty->print_cr("  Maximum free fraction %f",
   968         maximum_free_percentage);
   969       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
   970       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
   971         desired_capacity/1000);
   972       int prev_level = level() - 1;
   973       if (prev_level >= 0) {
   974         size_t prev_size = 0;
   975         GenCollectedHeap* gch = GenCollectedHeap::heap();
   976         Generation* prev_gen = gch->_gens[prev_level];
   977         prev_size = prev_gen->capacity();
   978           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
   979                                  prev_size/1000);
   980       }
   981       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
   982         unsafe_max_alloc_nogc()/1000);
   983       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
   984         contiguous_available()/1000);
   985       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
   986         expand_bytes);
   987     }
   988     // safe if expansion fails
   989     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
   990     if (PrintGCDetails && Verbose) {
   991       gclog_or_tty->print_cr("  Expanded free fraction %f",
   992         ((double) free()) / capacity());
   993     }
   994   } else {
   995     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
   996     assert(desired_capacity <= capacity(), "invalid expansion size");
   997     size_t shrink_bytes = capacity() - desired_capacity;
   998     // Don't shrink unless the delta is greater than the minimum shrink we want
   999     if (shrink_bytes >= MinHeapDeltaBytes) {
  1000       shrink_free_list_by(shrink_bytes);
  1005 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
  1006   return cmsSpace()->freelistLock();
  1009 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
  1010                                                   bool   tlab) {
  1011   CMSSynchronousYieldRequest yr;
  1012   MutexLockerEx x(freelistLock(),
  1013                   Mutex::_no_safepoint_check_flag);
  1014   return have_lock_and_allocate(size, tlab);
  1017 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
  1018                                                   bool   tlab /* ignored */) {
  1019   assert_lock_strong(freelistLock());
  1020   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
  1021   HeapWord* res = cmsSpace()->allocate(adjustedSize);
  1022   // Allocate the object live (grey) if the background collector has
  1023   // started marking. This is necessary because the marker may
  1024   // have passed this address and consequently this object will
  1025   // not otherwise be greyed and would be incorrectly swept up.
  1026   // Note that if this object contains references, the writing
  1027   // of those references will dirty the card containing this object
  1028   // allowing the object to be blackened (and its references scanned)
  1029   // either during a preclean phase or at the final checkpoint.
  1030   if (res != NULL) {
  1031     // We may block here with an uninitialized object with
  1032     // its mark-bit or P-bits not yet set. Such objects need
  1033     // to be safely navigable by block_start().
  1034     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
  1035     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
  1036     collector()->direct_allocated(res, adjustedSize);
  1037     _direct_allocated_words += adjustedSize;
  1038     // allocation counters
  1039     NOT_PRODUCT(
  1040       _numObjectsAllocated++;
  1041       _numWordsAllocated += (int)adjustedSize;
  1044   return res;
  1047 // In the case of direct allocation by mutators in a generation that
  1048 // is being concurrently collected, the object must be allocated
  1049 // live (grey) if the background collector has started marking.
  1050 // This is necessary because the marker may
  1051 // have passed this address and consequently this object will
  1052 // not otherwise be greyed and would be incorrectly swept up.
  1053 // Note that if this object contains references, the writing
  1054 // of those references will dirty the card containing this object
  1055 // allowing the object to be blackened (and its references scanned)
  1056 // either during a preclean phase or at the final checkpoint.
  1057 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
  1058   assert(_markBitMap.covers(start, size), "Out of bounds");
  1059   if (_collectorState >= Marking) {
  1060     MutexLockerEx y(_markBitMap.lock(),
  1061                     Mutex::_no_safepoint_check_flag);
  1062     // [see comments preceding SweepClosure::do_blk() below for details]
  1063     //
  1064     // Can the P-bits be deleted now?  JJJ
  1065     //
  1066     // 1. need to mark the object as live so it isn't collected
  1067     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
  1068     // 3. need to mark the end of the object so marking, precleaning or sweeping
  1069     //    can skip over uninitialized or unparsable objects. An allocated
  1070     //    object is considered uninitialized for our purposes as long as
  1071     //    its klass word is NULL.  All old gen objects are parsable
  1072     //    as soon as they are initialized.)
  1073     _markBitMap.mark(start);          // object is live
  1074     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
  1075     _markBitMap.mark(start + size - 1);
  1076                                       // mark end of object
  1078   // check that oop looks uninitialized
  1079   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
  1082 void CMSCollector::promoted(bool par, HeapWord* start,
  1083                             bool is_obj_array, size_t obj_size) {
  1084   assert(_markBitMap.covers(start), "Out of bounds");
  1085   // See comment in direct_allocated() about when objects should
  1086   // be allocated live.
  1087   if (_collectorState >= Marking) {
  1088     // we already hold the marking bit map lock, taken in
  1089     // the prologue
  1090     if (par) {
  1091       _markBitMap.par_mark(start);
  1092     } else {
  1093       _markBitMap.mark(start);
  1095     // We don't need to mark the object as uninitialized (as
  1096     // in direct_allocated above) because this is being done with the
  1097     // world stopped and the object will be initialized by the
  1098     // time the marking, precleaning or sweeping get to look at it.
  1099     // But see the code for copying objects into the CMS generation,
  1100     // where we need to ensure that concurrent readers of the
  1101     // block offset table are able to safely navigate a block that
  1102     // is in flux from being free to being allocated (and in
  1103     // transition while being copied into) and subsequently
  1104     // becoming a bona-fide object when the copy/promotion is complete.
  1105     assert(SafepointSynchronize::is_at_safepoint(),
  1106            "expect promotion only at safepoints");
  1108     if (_collectorState < Sweeping) {
  1109       // Mark the appropriate cards in the modUnionTable, so that
  1110       // this object gets scanned before the sweep. If this is
  1111       // not done, CMS generation references in the object might
  1112       // not get marked.
  1113       // For the case of arrays, which are otherwise precisely
  1114       // marked, we need to dirty the entire array, not just its head.
  1115       if (is_obj_array) {
  1116         // The [par_]mark_range() method expects mr.end() below to
  1117         // be aligned to the granularity of a bit's representation
  1118         // in the heap. In the case of the MUT below, that's a
  1119         // card size.
  1120         MemRegion mr(start,
  1121                      (HeapWord*)round_to((intptr_t)(start + obj_size),
  1122                         CardTableModRefBS::card_size /* bytes */));
  1123         if (par) {
  1124           _modUnionTable.par_mark_range(mr);
  1125         } else {
  1126           _modUnionTable.mark_range(mr);
  1128       } else {  // not an obj array; we can just mark the head
  1129         if (par) {
  1130           _modUnionTable.par_mark(start);
  1131         } else {
  1132           _modUnionTable.mark(start);
  1139 static inline size_t percent_of_space(Space* space, HeapWord* addr)
  1141   size_t delta = pointer_delta(addr, space->bottom());
  1142   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
  1145 void CMSCollector::icms_update_allocation_limits()
  1147   Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
  1148   EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
  1150   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
  1151   if (CMSTraceIncrementalPacing) {
  1152     stats().print();
  1155   assert(duty_cycle <= 100, "invalid duty cycle");
  1156   if (duty_cycle != 0) {
  1157     // The duty_cycle is a percentage between 0 and 100; convert to words and
  1158     // then compute the offset from the endpoints of the space.
  1159     size_t free_words = eden->free() / HeapWordSize;
  1160     double free_words_dbl = (double)free_words;
  1161     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
  1162     size_t offset_words = (free_words - duty_cycle_words) / 2;
  1164     _icms_start_limit = eden->top() + offset_words;
  1165     _icms_stop_limit = eden->end() - offset_words;
  1167     // The limits may be adjusted (shifted to the right) by
  1168     // CMSIncrementalOffset, to allow the application more mutator time after a
  1169     // young gen gc (when all mutators were stopped) and before CMS starts and
  1170     // takes away one or more cpus.
  1171     if (CMSIncrementalOffset != 0) {
  1172       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
  1173       size_t adjustment = (size_t)adjustment_dbl;
  1174       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
  1175       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
  1176         _icms_start_limit += adjustment;
  1177         _icms_stop_limit = tmp_stop;
  1181   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
  1182     _icms_start_limit = _icms_stop_limit = eden->end();
  1185   // Install the new start limit.
  1186   eden->set_soft_end(_icms_start_limit);
  1188   if (CMSTraceIncrementalMode) {
  1189     gclog_or_tty->print(" icms alloc limits:  "
  1190                            PTR_FORMAT "," PTR_FORMAT
  1191                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
  1192                            _icms_start_limit, _icms_stop_limit,
  1193                            percent_of_space(eden, _icms_start_limit),
  1194                            percent_of_space(eden, _icms_stop_limit));
  1195     if (Verbose) {
  1196       gclog_or_tty->print("eden:  ");
  1197       eden->print_on(gclog_or_tty);
  1202 // Any changes here should try to maintain the invariant
  1203 // that if this method is called with _icms_start_limit
  1204 // and _icms_stop_limit both NULL, then it should return NULL
  1205 // and not notify the icms thread.
  1206 HeapWord*
  1207 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
  1208                                        size_t word_size)
  1210   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
  1211   // nop.
  1212   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
  1213     if (top <= _icms_start_limit) {
  1214       if (CMSTraceIncrementalMode) {
  1215         space->print_on(gclog_or_tty);
  1216         gclog_or_tty->stamp();
  1217         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
  1218                                ", new limit=" PTR_FORMAT
  1219                                " (" SIZE_FORMAT "%%)",
  1220                                top, _icms_stop_limit,
  1221                                percent_of_space(space, _icms_stop_limit));
  1223       ConcurrentMarkSweepThread::start_icms();
  1224       assert(top < _icms_stop_limit, "Tautology");
  1225       if (word_size < pointer_delta(_icms_stop_limit, top)) {
  1226         return _icms_stop_limit;
  1229       // The allocation will cross both the _start and _stop limits, so do the
  1230       // stop notification also and return end().
  1231       if (CMSTraceIncrementalMode) {
  1232         space->print_on(gclog_or_tty);
  1233         gclog_or_tty->stamp();
  1234         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
  1235                                ", new limit=" PTR_FORMAT
  1236                                " (" SIZE_FORMAT "%%)",
  1237                                top, space->end(),
  1238                                percent_of_space(space, space->end()));
  1240       ConcurrentMarkSweepThread::stop_icms();
  1241       return space->end();
  1244     if (top <= _icms_stop_limit) {
  1245       if (CMSTraceIncrementalMode) {
  1246         space->print_on(gclog_or_tty);
  1247         gclog_or_tty->stamp();
  1248         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
  1249                                ", new limit=" PTR_FORMAT
  1250                                " (" SIZE_FORMAT "%%)",
  1251                                top, space->end(),
  1252                                percent_of_space(space, space->end()));
  1254       ConcurrentMarkSweepThread::stop_icms();
  1255       return space->end();
  1258     if (CMSTraceIncrementalMode) {
  1259       space->print_on(gclog_or_tty);
  1260       gclog_or_tty->stamp();
  1261       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
  1262                              ", new limit=" PTR_FORMAT,
  1263                              top, NULL);
  1267   return NULL;
  1270 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
  1271   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1272   // allocate, copy and if necessary update promoinfo --
  1273   // delegate to underlying space.
  1274   assert_lock_strong(freelistLock());
  1276 #ifndef PRODUCT
  1277   if (Universe::heap()->promotion_should_fail()) {
  1278     return NULL;
  1280 #endif  // #ifndef PRODUCT
  1282   oop res = _cmsSpace->promote(obj, obj_size);
  1283   if (res == NULL) {
  1284     // expand and retry
  1285     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
  1286     expand(s*HeapWordSize, MinHeapDeltaBytes,
  1287       CMSExpansionCause::_satisfy_promotion);
  1288     // Since there's currently no next generation, we don't try to promote
  1289     // into a more senior generation.
  1290     assert(next_gen() == NULL, "assumption, based upon which no attempt "
  1291                                "is made to pass on a possibly failing "
  1292                                "promotion to next generation");
  1293     res = _cmsSpace->promote(obj, obj_size);
  1295   if (res != NULL) {
  1296     // See comment in allocate() about when objects should
  1297     // be allocated live.
  1298     assert(obj->is_oop(), "Will dereference klass pointer below");
  1299     collector()->promoted(false,           // Not parallel
  1300                           (HeapWord*)res, obj->is_objArray(), obj_size);
  1301     // promotion counters
  1302     NOT_PRODUCT(
  1303       _numObjectsPromoted++;
  1304       _numWordsPromoted +=
  1305         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
  1308   return res;
  1312 HeapWord*
  1313 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
  1314                                              HeapWord* top,
  1315                                              size_t word_sz)
  1317   return collector()->allocation_limit_reached(space, top, word_sz);
  1320 // IMPORTANT: Notes on object size recognition in CMS.
  1321 // ---------------------------------------------------
  1322 // A block of storage in the CMS generation is always in
  1323 // one of three states. A free block (FREE), an allocated
  1324 // object (OBJECT) whose size() method reports the correct size,
  1325 // and an intermediate state (TRANSIENT) in which its size cannot
  1326 // be accurately determined.
  1327 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
  1328 // -----------------------------------------------------
  1329 // FREE:      klass_word & 1 == 1; mark_word holds block size
  1330 //
  1331 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
  1332 //            obj->size() computes correct size
  1333 //
  1334 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
  1335 //
  1336 // STATE IDENTIFICATION: (64 bit+COOPS)
  1337 // ------------------------------------
  1338 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
  1339 //
  1340 // OBJECT:    klass_word installed; klass_word != 0;
  1341 //            obj->size() computes correct size
  1342 //
  1343 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
  1344 //
  1345 //
  1346 // STATE TRANSITION DIAGRAM
  1347 //
  1348 //        mut / parnew                     mut  /  parnew
  1349 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
  1350 //  ^                                                                   |
  1351 //  |------------------------ DEAD <------------------------------------|
  1352 //         sweep                            mut
  1353 //
  1354 // While a block is in TRANSIENT state its size cannot be determined
  1355 // so readers will either need to come back later or stall until
  1356 // the size can be determined. Note that for the case of direct
  1357 // allocation, P-bits, when available, may be used to determine the
  1358 // size of an object that may not yet have been initialized.
  1360 // Things to support parallel young-gen collection.
  1361 oop
  1362 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
  1363                                            oop old, markOop m,
  1364                                            size_t word_sz) {
  1365 #ifndef PRODUCT
  1366   if (Universe::heap()->promotion_should_fail()) {
  1367     return NULL;
  1369 #endif  // #ifndef PRODUCT
  1371   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
  1372   PromotionInfo* promoInfo = &ps->promo;
  1373   // if we are tracking promotions, then first ensure space for
  1374   // promotion (including spooling space for saving header if necessary).
  1375   // then allocate and copy, then track promoted info if needed.
  1376   // When tracking (see PromotionInfo::track()), the mark word may
  1377   // be displaced and in this case restoration of the mark word
  1378   // occurs in the (oop_since_save_marks_)iterate phase.
  1379   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
  1380     // Out of space for allocating spooling buffers;
  1381     // try expanding and allocating spooling buffers.
  1382     if (!expand_and_ensure_spooling_space(promoInfo)) {
  1383       return NULL;
  1386   assert(promoInfo->has_spooling_space(), "Control point invariant");
  1387   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
  1388   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
  1389   if (obj_ptr == NULL) {
  1390      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
  1391      if (obj_ptr == NULL) {
  1392        return NULL;
  1395   oop obj = oop(obj_ptr);
  1396   OrderAccess::storestore();
  1397   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
  1398   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
  1399   // IMPORTANT: See note on object initialization for CMS above.
  1400   // Otherwise, copy the object.  Here we must be careful to insert the
  1401   // klass pointer last, since this marks the block as an allocated object.
  1402   // Except with compressed oops it's the mark word.
  1403   HeapWord* old_ptr = (HeapWord*)old;
  1404   // Restore the mark word copied above.
  1405   obj->set_mark(m);
  1406   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
  1407   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
  1408   OrderAccess::storestore();
  1410   if (UseCompressedClassPointers) {
  1411     // Copy gap missed by (aligned) header size calculation below
  1412     obj->set_klass_gap(old->klass_gap());
  1414   if (word_sz > (size_t)oopDesc::header_size()) {
  1415     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
  1416                                  obj_ptr + oopDesc::header_size(),
  1417                                  word_sz - oopDesc::header_size());
  1420   // Now we can track the promoted object, if necessary.  We take care
  1421   // to delay the transition from uninitialized to full object
  1422   // (i.e., insertion of klass pointer) until after, so that it
  1423   // atomically becomes a promoted object.
  1424   if (promoInfo->tracking()) {
  1425     promoInfo->track((PromotedObject*)obj, old->klass());
  1427   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
  1428   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
  1429   assert(old->is_oop(), "Will use and dereference old klass ptr below");
  1431   // Finally, install the klass pointer (this should be volatile).
  1432   OrderAccess::storestore();
  1433   obj->set_klass(old->klass());
  1434   // We should now be able to calculate the right size for this object
  1435   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
  1437   collector()->promoted(true,          // parallel
  1438                         obj_ptr, old->is_objArray(), word_sz);
  1440   NOT_PRODUCT(
  1441     Atomic::inc_ptr(&_numObjectsPromoted);
  1442     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
  1445   return obj;
  1448 void
  1449 ConcurrentMarkSweepGeneration::
  1450 par_promote_alloc_undo(int thread_num,
  1451                        HeapWord* obj, size_t word_sz) {
  1452   // CMS does not support promotion undo.
  1453   ShouldNotReachHere();
  1456 void
  1457 ConcurrentMarkSweepGeneration::
  1458 par_promote_alloc_done(int thread_num) {
  1459   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
  1460   ps->lab.retire(thread_num);
  1463 void
  1464 ConcurrentMarkSweepGeneration::
  1465 par_oop_since_save_marks_iterate_done(int thread_num) {
  1466   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
  1467   ParScanWithoutBarrierClosure* dummy_cl = NULL;
  1468   ps->promo.promoted_oops_iterate_nv(dummy_cl);
  1471 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
  1472                                                    size_t size,
  1473                                                    bool   tlab)
  1475   // We allow a STW collection only if a full
  1476   // collection was requested.
  1477   return full || should_allocate(size, tlab); // FIX ME !!!
  1478   // This and promotion failure handling are connected at the
  1479   // hip and should be fixed by untying them.
  1482 bool CMSCollector::shouldConcurrentCollect() {
  1483   if (_full_gc_requested) {
  1484     if (Verbose && PrintGCDetails) {
  1485       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
  1486                              " gc request (or gc_locker)");
  1488     return true;
  1491   // For debugging purposes, change the type of collection.
  1492   // If the rotation is not on the concurrent collection
  1493   // type, don't start a concurrent collection.
  1494   NOT_PRODUCT(
  1495     if (RotateCMSCollectionTypes &&
  1496         (_cmsGen->debug_collection_type() !=
  1497           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
  1498       assert(_cmsGen->debug_collection_type() !=
  1499         ConcurrentMarkSweepGeneration::Unknown_collection_type,
  1500         "Bad cms collection type");
  1501       return false;
  1505   FreelistLocker x(this);
  1506   // ------------------------------------------------------------------
  1507   // Print out lots of information which affects the initiation of
  1508   // a collection.
  1509   if (PrintCMSInitiationStatistics && stats().valid()) {
  1510     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
  1511     gclog_or_tty->stamp();
  1512     gclog_or_tty->print_cr("");
  1513     stats().print_on(gclog_or_tty);
  1514     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
  1515       stats().time_until_cms_gen_full());
  1516     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
  1517     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
  1518                            _cmsGen->contiguous_available());
  1519     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
  1520     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
  1521     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
  1522     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
  1523     gclog_or_tty->print_cr("metadata initialized %d",
  1524       MetaspaceGC::should_concurrent_collect());
  1526   // ------------------------------------------------------------------
  1528   // If the estimated time to complete a cms collection (cms_duration())
  1529   // is less than the estimated time remaining until the cms generation
  1530   // is full, start a collection.
  1531   if (!UseCMSInitiatingOccupancyOnly) {
  1532     if (stats().valid()) {
  1533       if (stats().time_until_cms_start() == 0.0) {
  1534         return true;
  1536     } else {
  1537       // We want to conservatively collect somewhat early in order
  1538       // to try and "bootstrap" our CMS/promotion statistics;
  1539       // this branch will not fire after the first successful CMS
  1540       // collection because the stats should then be valid.
  1541       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
  1542         if (Verbose && PrintGCDetails) {
  1543           gclog_or_tty->print_cr(
  1544             " CMSCollector: collect for bootstrapping statistics:"
  1545             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
  1546             _bootstrap_occupancy);
  1548         return true;
  1553   // Otherwise, we start a collection cycle if
  1554   // old gen want a collection cycle started. Each may use
  1555   // an appropriate criterion for making this decision.
  1556   // XXX We need to make sure that the gen expansion
  1557   // criterion dovetails well with this. XXX NEED TO FIX THIS
  1558   if (_cmsGen->should_concurrent_collect()) {
  1559     if (Verbose && PrintGCDetails) {
  1560       gclog_or_tty->print_cr("CMS old gen initiated");
  1562     return true;
  1565   // We start a collection if we believe an incremental collection may fail;
  1566   // this is not likely to be productive in practice because it's probably too
  1567   // late anyway.
  1568   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1569   assert(gch->collector_policy()->is_two_generation_policy(),
  1570          "You may want to check the correctness of the following");
  1571   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
  1572     if (Verbose && PrintGCDetails) {
  1573       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
  1575     return true;
  1578   if (MetaspaceGC::should_concurrent_collect()) {
  1579       if (Verbose && PrintGCDetails) {
  1580       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
  1582       return true;
  1585   return false;
  1588 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
  1590 // Clear _expansion_cause fields of constituent generations
  1591 void CMSCollector::clear_expansion_cause() {
  1592   _cmsGen->clear_expansion_cause();
  1595 // We should be conservative in starting a collection cycle.  To
  1596 // start too eagerly runs the risk of collecting too often in the
  1597 // extreme.  To collect too rarely falls back on full collections,
  1598 // which works, even if not optimum in terms of concurrent work.
  1599 // As a work around for too eagerly collecting, use the flag
  1600 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
  1601 // giving the user an easily understandable way of controlling the
  1602 // collections.
  1603 // We want to start a new collection cycle if any of the following
  1604 // conditions hold:
  1605 // . our current occupancy exceeds the configured initiating occupancy
  1606 //   for this generation, or
  1607 // . we recently needed to expand this space and have not, since that
  1608 //   expansion, done a collection of this generation, or
  1609 // . the underlying space believes that it may be a good idea to initiate
  1610 //   a concurrent collection (this may be based on criteria such as the
  1611 //   following: the space uses linear allocation and linear allocation is
  1612 //   going to fail, or there is believed to be excessive fragmentation in
  1613 //   the generation, etc... or ...
  1614 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
  1615 //   the case of the old generation; see CR 6543076):
  1616 //   we may be approaching a point at which allocation requests may fail because
  1617 //   we will be out of sufficient free space given allocation rate estimates.]
  1618 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
  1620   assert_lock_strong(freelistLock());
  1621   if (occupancy() > initiating_occupancy()) {
  1622     if (PrintGCDetails && Verbose) {
  1623       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
  1624         short_name(), occupancy(), initiating_occupancy());
  1626     return true;
  1628   if (UseCMSInitiatingOccupancyOnly) {
  1629     return false;
  1631   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
  1632     if (PrintGCDetails && Verbose) {
  1633       gclog_or_tty->print(" %s: collect because expanded for allocation ",
  1634         short_name());
  1636     return true;
  1638   if (_cmsSpace->should_concurrent_collect()) {
  1639     if (PrintGCDetails && Verbose) {
  1640       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
  1641         short_name());
  1643     return true;
  1645   return false;
  1648 void ConcurrentMarkSweepGeneration::collect(bool   full,
  1649                                             bool   clear_all_soft_refs,
  1650                                             size_t size,
  1651                                             bool   tlab)
  1653   collector()->collect(full, clear_all_soft_refs, size, tlab);
  1656 void CMSCollector::collect(bool   full,
  1657                            bool   clear_all_soft_refs,
  1658                            size_t size,
  1659                            bool   tlab)
  1661   if (!UseCMSCollectionPassing && _collectorState > Idling) {
  1662     // For debugging purposes skip the collection if the state
  1663     // is not currently idle
  1664     if (TraceCMSState) {
  1665       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
  1666         Thread::current(), full, _collectorState);
  1668     return;
  1671   // The following "if" branch is present for defensive reasons.
  1672   // In the current uses of this interface, it can be replaced with:
  1673   // assert(!GC_locker.is_active(), "Can't be called otherwise");
  1674   // But I am not placing that assert here to allow future
  1675   // generality in invoking this interface.
  1676   if (GC_locker::is_active()) {
  1677     // A consistency test for GC_locker
  1678     assert(GC_locker::needs_gc(), "Should have been set already");
  1679     // Skip this foreground collection, instead
  1680     // expanding the heap if necessary.
  1681     // Need the free list locks for the call to free() in compute_new_size()
  1682     compute_new_size();
  1683     return;
  1685   acquire_control_and_collect(full, clear_all_soft_refs);
  1686   _full_gcs_since_conc_gc++;
  1689 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
  1690   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1691   unsigned int gc_count = gch->total_full_collections();
  1692   if (gc_count == full_gc_count) {
  1693     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
  1694     _full_gc_requested = true;
  1695     _full_gc_cause = cause;
  1696     CGC_lock->notify();   // nudge CMS thread
  1697   } else {
  1698     assert(gc_count > full_gc_count, "Error: causal loop");
  1702 bool CMSCollector::is_external_interruption() {
  1703   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
  1704   return GCCause::is_user_requested_gc(cause) ||
  1705          GCCause::is_serviceability_requested_gc(cause);
  1708 void CMSCollector::report_concurrent_mode_interruption() {
  1709   if (is_external_interruption()) {
  1710     if (PrintGCDetails) {
  1711       gclog_or_tty->print(" (concurrent mode interrupted)");
  1713   } else {
  1714     if (PrintGCDetails) {
  1715       gclog_or_tty->print(" (concurrent mode failure)");
  1717     _gc_tracer_cm->report_concurrent_mode_failure();
  1722 // The foreground and background collectors need to coordinate in order
  1723 // to make sure that they do not mutually interfere with CMS collections.
  1724 // When a background collection is active,
  1725 // the foreground collector may need to take over (preempt) and
  1726 // synchronously complete an ongoing collection. Depending on the
  1727 // frequency of the background collections and the heap usage
  1728 // of the application, this preemption can be seldom or frequent.
  1729 // There are only certain
  1730 // points in the background collection that the "collection-baton"
  1731 // can be passed to the foreground collector.
  1732 //
  1733 // The foreground collector will wait for the baton before
  1734 // starting any part of the collection.  The foreground collector
  1735 // will only wait at one location.
  1736 //
  1737 // The background collector will yield the baton before starting a new
  1738 // phase of the collection (e.g., before initial marking, marking from roots,
  1739 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
  1740 // of the loop which switches the phases. The background collector does some
  1741 // of the phases (initial mark, final re-mark) with the world stopped.
  1742 // Because of locking involved in stopping the world,
  1743 // the foreground collector should not block waiting for the background
  1744 // collector when it is doing a stop-the-world phase.  The background
  1745 // collector will yield the baton at an additional point just before
  1746 // it enters a stop-the-world phase.  Once the world is stopped, the
  1747 // background collector checks the phase of the collection.  If the
  1748 // phase has not changed, it proceeds with the collection.  If the
  1749 // phase has changed, it skips that phase of the collection.  See
  1750 // the comments on the use of the Heap_lock in collect_in_background().
  1751 //
  1752 // Variable used in baton passing.
  1753 //   _foregroundGCIsActive - Set to true by the foreground collector when
  1754 //      it wants the baton.  The foreground clears it when it has finished
  1755 //      the collection.
  1756 //   _foregroundGCShouldWait - Set to true by the background collector
  1757 //        when it is running.  The foreground collector waits while
  1758 //      _foregroundGCShouldWait is true.
  1759 //  CGC_lock - monitor used to protect access to the above variables
  1760 //      and to notify the foreground and background collectors.
  1761 //  _collectorState - current state of the CMS collection.
  1762 //
  1763 // The foreground collector
  1764 //   acquires the CGC_lock
  1765 //   sets _foregroundGCIsActive
  1766 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
  1767 //     various locks acquired in preparation for the collection
  1768 //     are released so as not to block the background collector
  1769 //     that is in the midst of a collection
  1770 //   proceeds with the collection
  1771 //   clears _foregroundGCIsActive
  1772 //   returns
  1773 //
  1774 // The background collector in a loop iterating on the phases of the
  1775 //      collection
  1776 //   acquires the CGC_lock
  1777 //   sets _foregroundGCShouldWait
  1778 //   if _foregroundGCIsActive is set
  1779 //     clears _foregroundGCShouldWait, notifies _CGC_lock
  1780 //     waits on _CGC_lock for _foregroundGCIsActive to become false
  1781 //     and exits the loop.
  1782 //   otherwise
  1783 //     proceed with that phase of the collection
  1784 //     if the phase is a stop-the-world phase,
  1785 //       yield the baton once more just before enqueueing
  1786 //       the stop-world CMS operation (executed by the VM thread).
  1787 //   returns after all phases of the collection are done
  1788 //
  1790 void CMSCollector::acquire_control_and_collect(bool full,
  1791         bool clear_all_soft_refs) {
  1792   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  1793   assert(!Thread::current()->is_ConcurrentGC_thread(),
  1794          "shouldn't try to acquire control from self!");
  1796   // Start the protocol for acquiring control of the
  1797   // collection from the background collector (aka CMS thread).
  1798   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
  1799          "VM thread should have CMS token");
  1800   // Remember the possibly interrupted state of an ongoing
  1801   // concurrent collection
  1802   CollectorState first_state = _collectorState;
  1804   // Signal to a possibly ongoing concurrent collection that
  1805   // we want to do a foreground collection.
  1806   _foregroundGCIsActive = true;
  1808   // Disable incremental mode during a foreground collection.
  1809   ICMSDisabler icms_disabler;
  1811   // release locks and wait for a notify from the background collector
  1812   // releasing the locks in only necessary for phases which
  1813   // do yields to improve the granularity of the collection.
  1814   assert_lock_strong(bitMapLock());
  1815   // We need to lock the Free list lock for the space that we are
  1816   // currently collecting.
  1817   assert(haveFreelistLocks(), "Must be holding free list locks");
  1818   bitMapLock()->unlock();
  1819   releaseFreelistLocks();
  1821     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  1822     if (_foregroundGCShouldWait) {
  1823       // We are going to be waiting for action for the CMS thread;
  1824       // it had better not be gone (for instance at shutdown)!
  1825       assert(ConcurrentMarkSweepThread::cmst() != NULL,
  1826              "CMS thread must be running");
  1827       // Wait here until the background collector gives us the go-ahead
  1828       ConcurrentMarkSweepThread::clear_CMS_flag(
  1829         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
  1830       // Get a possibly blocked CMS thread going:
  1831       //   Note that we set _foregroundGCIsActive true above,
  1832       //   without protection of the CGC_lock.
  1833       CGC_lock->notify();
  1834       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
  1835              "Possible deadlock");
  1836       while (_foregroundGCShouldWait) {
  1837         // wait for notification
  1838         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
  1839         // Possibility of delay/starvation here, since CMS token does
  1840         // not know to give priority to VM thread? Actually, i think
  1841         // there wouldn't be any delay/starvation, but the proof of
  1842         // that "fact" (?) appears non-trivial. XXX 20011219YSR
  1844       ConcurrentMarkSweepThread::set_CMS_flag(
  1845         ConcurrentMarkSweepThread::CMS_vm_has_token);
  1848   // The CMS_token is already held.  Get back the other locks.
  1849   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
  1850          "VM thread should have CMS token");
  1851   getFreelistLocks();
  1852   bitMapLock()->lock_without_safepoint_check();
  1853   if (TraceCMSState) {
  1854     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
  1855       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
  1856     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
  1859   // Check if we need to do a compaction, or if not, whether
  1860   // we need to start the mark-sweep from scratch.
  1861   bool should_compact    = false;
  1862   bool should_start_over = false;
  1863   decide_foreground_collection_type(clear_all_soft_refs,
  1864     &should_compact, &should_start_over);
  1866 NOT_PRODUCT(
  1867   if (RotateCMSCollectionTypes) {
  1868     if (_cmsGen->debug_collection_type() ==
  1869         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
  1870       should_compact = true;
  1871     } else if (_cmsGen->debug_collection_type() ==
  1872                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
  1873       should_compact = false;
  1878   if (first_state > Idling) {
  1879     report_concurrent_mode_interruption();
  1882   set_did_compact(should_compact);
  1883   if (should_compact) {
  1884     // If the collection is being acquired from the background
  1885     // collector, there may be references on the discovered
  1886     // references lists that have NULL referents (being those
  1887     // that were concurrently cleared by a mutator) or
  1888     // that are no longer active (having been enqueued concurrently
  1889     // by the mutator).
  1890     // Scrub the list of those references because Mark-Sweep-Compact
  1891     // code assumes referents are not NULL and that all discovered
  1892     // Reference objects are active.
  1893     ref_processor()->clean_up_discovered_references();
  1895     if (first_state > Idling) {
  1896       save_heap_summary();
  1899     do_compaction_work(clear_all_soft_refs);
  1901     // Has the GC time limit been exceeded?
  1902     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
  1903     size_t max_eden_size = young_gen->max_capacity() -
  1904                            young_gen->to()->capacity() -
  1905                            young_gen->from()->capacity();
  1906     GenCollectedHeap* gch = GenCollectedHeap::heap();
  1907     GCCause::Cause gc_cause = gch->gc_cause();
  1908     size_policy()->check_gc_overhead_limit(_young_gen->used(),
  1909                                            young_gen->eden()->used(),
  1910                                            _cmsGen->max_capacity(),
  1911                                            max_eden_size,
  1912                                            full,
  1913                                            gc_cause,
  1914                                            gch->collector_policy());
  1915   } else {
  1916     do_mark_sweep_work(clear_all_soft_refs, first_state,
  1917       should_start_over);
  1919   // Reset the expansion cause, now that we just completed
  1920   // a collection cycle.
  1921   clear_expansion_cause();
  1922   _foregroundGCIsActive = false;
  1923   return;
  1926 // Resize the tenured generation
  1927 // after obtaining the free list locks for the
  1928 // two generations.
  1929 void CMSCollector::compute_new_size() {
  1930   assert_locked_or_safepoint(Heap_lock);
  1931   FreelistLocker z(this);
  1932   MetaspaceGC::compute_new_size();
  1933   _cmsGen->compute_new_size_free_list();
  1936 // A work method used by foreground collection to determine
  1937 // what type of collection (compacting or not, continuing or fresh)
  1938 // it should do.
  1939 // NOTE: the intent is to make UseCMSCompactAtFullCollection
  1940 // and CMSCompactWhenClearAllSoftRefs the default in the future
  1941 // and do away with the flags after a suitable period.
  1942 void CMSCollector::decide_foreground_collection_type(
  1943   bool clear_all_soft_refs, bool* should_compact,
  1944   bool* should_start_over) {
  1945   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
  1946   // flag is set, and we have either requested a System.gc() or
  1947   // the number of full gc's since the last concurrent cycle
  1948   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
  1949   // or if an incremental collection has failed
  1950   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1951   assert(gch->collector_policy()->is_two_generation_policy(),
  1952          "You may want to check the correctness of the following");
  1953   // Inform cms gen if this was due to partial collection failing.
  1954   // The CMS gen may use this fact to determine its expansion policy.
  1955   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
  1956     assert(!_cmsGen->incremental_collection_failed(),
  1957            "Should have been noticed, reacted to and cleared");
  1958     _cmsGen->set_incremental_collection_failed();
  1960   *should_compact =
  1961     UseCMSCompactAtFullCollection &&
  1962     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
  1963      GCCause::is_user_requested_gc(gch->gc_cause()) ||
  1964      gch->incremental_collection_will_fail(true /* consult_young */));
  1965   *should_start_over = false;
  1966   if (clear_all_soft_refs && !*should_compact) {
  1967     // We are about to do a last ditch collection attempt
  1968     // so it would normally make sense to do a compaction
  1969     // to reclaim as much space as possible.
  1970     if (CMSCompactWhenClearAllSoftRefs) {
  1971       // Default: The rationale is that in this case either
  1972       // we are past the final marking phase, in which case
  1973       // we'd have to start over, or so little has been done
  1974       // that there's little point in saving that work. Compaction
  1975       // appears to be the sensible choice in either case.
  1976       *should_compact = true;
  1977     } else {
  1978       // We have been asked to clear all soft refs, but not to
  1979       // compact. Make sure that we aren't past the final checkpoint
  1980       // phase, for that is where we process soft refs. If we are already
  1981       // past that phase, we'll need to redo the refs discovery phase and
  1982       // if necessary clear soft refs that weren't previously
  1983       // cleared. We do so by remembering the phase in which
  1984       // we came in, and if we are past the refs processing
  1985       // phase, we'll choose to just redo the mark-sweep
  1986       // collection from scratch.
  1987       if (_collectorState > FinalMarking) {
  1988         // We are past the refs processing phase;
  1989         // start over and do a fresh synchronous CMS cycle
  1990         _collectorState = Resetting; // skip to reset to start new cycle
  1991         reset(false /* == !asynch */);
  1992         *should_start_over = true;
  1993       } // else we can continue a possibly ongoing current cycle
  1998 // A work method used by the foreground collector to do
  1999 // a mark-sweep-compact.
  2000 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
  2001   GenCollectedHeap* gch = GenCollectedHeap::heap();
  2003   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
  2004   gc_timer->register_gc_start(os::elapsed_counter());
  2006   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
  2007   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
  2009   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
  2010   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
  2011     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
  2012       "collections passed to foreground collector", _full_gcs_since_conc_gc);
  2015   // Sample collection interval time and reset for collection pause.
  2016   if (UseAdaptiveSizePolicy) {
  2017     size_policy()->msc_collection_begin();
  2020   // Temporarily widen the span of the weak reference processing to
  2021   // the entire heap.
  2022   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
  2023   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
  2024   // Temporarily, clear the "is_alive_non_header" field of the
  2025   // reference processor.
  2026   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
  2027   // Temporarily make reference _processing_ single threaded (non-MT).
  2028   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
  2029   // Temporarily make refs discovery atomic
  2030   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
  2031   // Temporarily make reference _discovery_ single threaded (non-MT)
  2032   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
  2034   ref_processor()->set_enqueuing_is_done(false);
  2035   ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
  2036   ref_processor()->setup_policy(clear_all_soft_refs);
  2037   // If an asynchronous collection finishes, the _modUnionTable is
  2038   // all clear.  If we are assuming the collection from an asynchronous
  2039   // collection, clear the _modUnionTable.
  2040   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
  2041     "_modUnionTable should be clear if the baton was not passed");
  2042   _modUnionTable.clear_all();
  2043   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
  2044     "mod union for klasses should be clear if the baton was passed");
  2045   _ct->klass_rem_set()->clear_mod_union();
  2047   // We must adjust the allocation statistics being maintained
  2048   // in the free list space. We do so by reading and clearing
  2049   // the sweep timer and updating the block flux rate estimates below.
  2050   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
  2051   if (_inter_sweep_timer.is_active()) {
  2052     _inter_sweep_timer.stop();
  2053     // Note that we do not use this sample to update the _inter_sweep_estimate.
  2054     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
  2055                                             _inter_sweep_estimate.padded_average(),
  2056                                             _intra_sweep_estimate.padded_average());
  2059   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
  2060     ref_processor(), clear_all_soft_refs);
  2061   #ifdef ASSERT
  2062     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
  2063     size_t free_size = cms_space->free();
  2064     assert(free_size ==
  2065            pointer_delta(cms_space->end(), cms_space->compaction_top())
  2066            * HeapWordSize,
  2067       "All the free space should be compacted into one chunk at top");
  2068     assert(cms_space->dictionary()->total_chunk_size(
  2069                                       debug_only(cms_space->freelistLock())) == 0 ||
  2070            cms_space->totalSizeInIndexedFreeLists() == 0,
  2071       "All the free space should be in a single chunk");
  2072     size_t num = cms_space->totalCount();
  2073     assert((free_size == 0 && num == 0) ||
  2074            (free_size > 0  && (num == 1 || num == 2)),
  2075          "There should be at most 2 free chunks after compaction");
  2076   #endif // ASSERT
  2077   _collectorState = Resetting;
  2078   assert(_restart_addr == NULL,
  2079          "Should have been NULL'd before baton was passed");
  2080   reset(false /* == !asynch */);
  2081   _cmsGen->reset_after_compaction();
  2082   _concurrent_cycles_since_last_unload = 0;
  2084   // Clear any data recorded in the PLAB chunk arrays.
  2085   if (_survivor_plab_array != NULL) {
  2086     reset_survivor_plab_arrays();
  2089   // Adjust the per-size allocation stats for the next epoch.
  2090   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
  2091   // Restart the "inter sweep timer" for the next epoch.
  2092   _inter_sweep_timer.reset();
  2093   _inter_sweep_timer.start();
  2095   // Sample collection pause time and reset for collection interval.
  2096   if (UseAdaptiveSizePolicy) {
  2097     size_policy()->msc_collection_end(gch->gc_cause());
  2100   gc_timer->register_gc_end(os::elapsed_counter());
  2102   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
  2104   // For a mark-sweep-compact, compute_new_size() will be called
  2105   // in the heap's do_collection() method.
  2108 // A work method used by the foreground collector to do
  2109 // a mark-sweep, after taking over from a possibly on-going
  2110 // concurrent mark-sweep collection.
  2111 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
  2112   CollectorState first_state, bool should_start_over) {
  2113   if (PrintGC && Verbose) {
  2114     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
  2115       "collector with count %d",
  2116       _full_gcs_since_conc_gc);
  2118   switch (_collectorState) {
  2119     case Idling:
  2120       if (first_state == Idling || should_start_over) {
  2121         // The background GC was not active, or should
  2122         // restarted from scratch;  start the cycle.
  2123         _collectorState = InitialMarking;
  2125       // If first_state was not Idling, then a background GC
  2126       // was in progress and has now finished.  No need to do it
  2127       // again.  Leave the state as Idling.
  2128       break;
  2129     case Precleaning:
  2130       // In the foreground case don't do the precleaning since
  2131       // it is not done concurrently and there is extra work
  2132       // required.
  2133       _collectorState = FinalMarking;
  2135   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
  2137   // For a mark-sweep, compute_new_size() will be called
  2138   // in the heap's do_collection() method.
  2142 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
  2143   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
  2144   EdenSpace* eden_space = dng->eden();
  2145   ContiguousSpace* from_space = dng->from();
  2146   ContiguousSpace* to_space   = dng->to();
  2147   // Eden
  2148   if (_eden_chunk_array != NULL) {
  2149     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
  2150                            eden_space->bottom(), eden_space->top(),
  2151                            eden_space->end(), eden_space->capacity());
  2152     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
  2153                            "_eden_chunk_capacity=" SIZE_FORMAT,
  2154                            _eden_chunk_index, _eden_chunk_capacity);
  2155     for (size_t i = 0; i < _eden_chunk_index; i++) {
  2156       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
  2157                              i, _eden_chunk_array[i]);
  2160   // Survivor
  2161   if (_survivor_chunk_array != NULL) {
  2162     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
  2163                            from_space->bottom(), from_space->top(),
  2164                            from_space->end(), from_space->capacity());
  2165     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
  2166                            "_survivor_chunk_capacity=" SIZE_FORMAT,
  2167                            _survivor_chunk_index, _survivor_chunk_capacity);
  2168     for (size_t i = 0; i < _survivor_chunk_index; i++) {
  2169       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
  2170                              i, _survivor_chunk_array[i]);
  2175 void CMSCollector::getFreelistLocks() const {
  2176   // Get locks for all free lists in all generations that this
  2177   // collector is responsible for
  2178   _cmsGen->freelistLock()->lock_without_safepoint_check();
  2181 void CMSCollector::releaseFreelistLocks() const {
  2182   // Release locks for all free lists in all generations that this
  2183   // collector is responsible for
  2184   _cmsGen->freelistLock()->unlock();
  2187 bool CMSCollector::haveFreelistLocks() const {
  2188   // Check locks for all free lists in all generations that this
  2189   // collector is responsible for
  2190   assert_lock_strong(_cmsGen->freelistLock());
  2191   PRODUCT_ONLY(ShouldNotReachHere());
  2192   return true;
  2195 // A utility class that is used by the CMS collector to
  2196 // temporarily "release" the foreground collector from its
  2197 // usual obligation to wait for the background collector to
  2198 // complete an ongoing phase before proceeding.
  2199 class ReleaseForegroundGC: public StackObj {
  2200  private:
  2201   CMSCollector* _c;
  2202  public:
  2203   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
  2204     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
  2205     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2206     // allow a potentially blocked foreground collector to proceed
  2207     _c->_foregroundGCShouldWait = false;
  2208     if (_c->_foregroundGCIsActive) {
  2209       CGC_lock->notify();
  2211     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  2212            "Possible deadlock");
  2215   ~ReleaseForegroundGC() {
  2216     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
  2217     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2218     _c->_foregroundGCShouldWait = true;
  2220 };
  2222 // There are separate collect_in_background and collect_in_foreground because of
  2223 // the different locking requirements of the background collector and the
  2224 // foreground collector.  There was originally an attempt to share
  2225 // one "collect" method between the background collector and the foreground
  2226 // collector but the if-then-else required made it cleaner to have
  2227 // separate methods.
  2228 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
  2229   assert(Thread::current()->is_ConcurrentGC_thread(),
  2230     "A CMS asynchronous collection is only allowed on a CMS thread.");
  2232   GenCollectedHeap* gch = GenCollectedHeap::heap();
  2234     bool safepoint_check = Mutex::_no_safepoint_check_flag;
  2235     MutexLockerEx hl(Heap_lock, safepoint_check);
  2236     FreelistLocker fll(this);
  2237     MutexLockerEx x(CGC_lock, safepoint_check);
  2238     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
  2239       // The foreground collector is active or we're
  2240       // not using asynchronous collections.  Skip this
  2241       // background collection.
  2242       assert(!_foregroundGCShouldWait, "Should be clear");
  2243       return;
  2244     } else {
  2245       assert(_collectorState == Idling, "Should be idling before start.");
  2246       _collectorState = InitialMarking;
  2247       register_gc_start(cause);
  2248       // Reset the expansion cause, now that we are about to begin
  2249       // a new cycle.
  2250       clear_expansion_cause();
  2252       // Clear the MetaspaceGC flag since a concurrent collection
  2253       // is starting but also clear it after the collection.
  2254       MetaspaceGC::set_should_concurrent_collect(false);
  2256     // Decide if we want to enable class unloading as part of the
  2257     // ensuing concurrent GC cycle.
  2258     update_should_unload_classes();
  2259     _full_gc_requested = false;           // acks all outstanding full gc requests
  2260     _full_gc_cause = GCCause::_no_gc;
  2261     // Signal that we are about to start a collection
  2262     gch->increment_total_full_collections();  // ... starting a collection cycle
  2263     _collection_count_start = gch->total_full_collections();
  2266   // Used for PrintGC
  2267   size_t prev_used;
  2268   if (PrintGC && Verbose) {
  2269     prev_used = _cmsGen->used(); // XXXPERM
  2272   // The change of the collection state is normally done at this level;
  2273   // the exceptions are phases that are executed while the world is
  2274   // stopped.  For those phases the change of state is done while the
  2275   // world is stopped.  For baton passing purposes this allows the
  2276   // background collector to finish the phase and change state atomically.
  2277   // The foreground collector cannot wait on a phase that is done
  2278   // while the world is stopped because the foreground collector already
  2279   // has the world stopped and would deadlock.
  2280   while (_collectorState != Idling) {
  2281     if (TraceCMSState) {
  2282       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
  2283         Thread::current(), _collectorState);
  2285     // The foreground collector
  2286     //   holds the Heap_lock throughout its collection.
  2287     //   holds the CMS token (but not the lock)
  2288     //     except while it is waiting for the background collector to yield.
  2289     //
  2290     // The foreground collector should be blocked (not for long)
  2291     //   if the background collector is about to start a phase
  2292     //   executed with world stopped.  If the background
  2293     //   collector has already started such a phase, the
  2294     //   foreground collector is blocked waiting for the
  2295     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
  2296     //   are executed in the VM thread.
  2297     //
  2298     // The locking order is
  2299     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
  2300     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
  2301     //   CMS token  (claimed in
  2302     //                stop_world_and_do() -->
  2303     //                  safepoint_synchronize() -->
  2304     //                    CMSThread::synchronize())
  2307       // Check if the FG collector wants us to yield.
  2308       CMSTokenSync x(true); // is cms thread
  2309       if (waitForForegroundGC()) {
  2310         // We yielded to a foreground GC, nothing more to be
  2311         // done this round.
  2312         assert(_foregroundGCShouldWait == false, "We set it to false in "
  2313                "waitForForegroundGC()");
  2314         if (TraceCMSState) {
  2315           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
  2316             " exiting collection CMS state %d",
  2317             Thread::current(), _collectorState);
  2319         return;
  2320       } else {
  2321         // The background collector can run but check to see if the
  2322         // foreground collector has done a collection while the
  2323         // background collector was waiting to get the CGC_lock
  2324         // above.  If yes, break so that _foregroundGCShouldWait
  2325         // is cleared before returning.
  2326         if (_collectorState == Idling) {
  2327           break;
  2332     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
  2333       "should be waiting");
  2335     switch (_collectorState) {
  2336       case InitialMarking:
  2338           ReleaseForegroundGC x(this);
  2339           stats().record_cms_begin();
  2340           VM_CMS_Initial_Mark initial_mark_op(this);
  2341           VMThread::execute(&initial_mark_op);
  2343         // The collector state may be any legal state at this point
  2344         // since the background collector may have yielded to the
  2345         // foreground collector.
  2346         break;
  2347       case Marking:
  2348         // initial marking in checkpointRootsInitialWork has been completed
  2349         if (markFromRoots(true)) { // we were successful
  2350           assert(_collectorState == Precleaning, "Collector state should "
  2351             "have changed");
  2352         } else {
  2353           assert(_foregroundGCIsActive, "Internal state inconsistency");
  2355         break;
  2356       case Precleaning:
  2357         if (UseAdaptiveSizePolicy) {
  2358           size_policy()->concurrent_precleaning_begin();
  2360         // marking from roots in markFromRoots has been completed
  2361         preclean();
  2362         if (UseAdaptiveSizePolicy) {
  2363           size_policy()->concurrent_precleaning_end();
  2365         assert(_collectorState == AbortablePreclean ||
  2366                _collectorState == FinalMarking,
  2367                "Collector state should have changed");
  2368         break;
  2369       case AbortablePreclean:
  2370         if (UseAdaptiveSizePolicy) {
  2371         size_policy()->concurrent_phases_resume();
  2373         abortable_preclean();
  2374         if (UseAdaptiveSizePolicy) {
  2375           size_policy()->concurrent_precleaning_end();
  2377         assert(_collectorState == FinalMarking, "Collector state should "
  2378           "have changed");
  2379         break;
  2380       case FinalMarking:
  2382           ReleaseForegroundGC x(this);
  2384           VM_CMS_Final_Remark final_remark_op(this);
  2385           VMThread::execute(&final_remark_op);
  2387         assert(_foregroundGCShouldWait, "block post-condition");
  2388         break;
  2389       case Sweeping:
  2390         if (UseAdaptiveSizePolicy) {
  2391           size_policy()->concurrent_sweeping_begin();
  2393         // final marking in checkpointRootsFinal has been completed
  2394         sweep(true);
  2395         assert(_collectorState == Resizing, "Collector state change "
  2396           "to Resizing must be done under the free_list_lock");
  2397         _full_gcs_since_conc_gc = 0;
  2399         // Stop the timers for adaptive size policy for the concurrent phases
  2400         if (UseAdaptiveSizePolicy) {
  2401           size_policy()->concurrent_sweeping_end();
  2402           size_policy()->concurrent_phases_end(gch->gc_cause(),
  2403                                              gch->prev_gen(_cmsGen)->capacity(),
  2404                                              _cmsGen->free());
  2407       case Resizing: {
  2408         // Sweeping has been completed...
  2409         // At this point the background collection has completed.
  2410         // Don't move the call to compute_new_size() down
  2411         // into code that might be executed if the background
  2412         // collection was preempted.
  2414           ReleaseForegroundGC x(this);   // unblock FG collection
  2415           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
  2416           CMSTokenSync        z(true);   // not strictly needed.
  2417           if (_collectorState == Resizing) {
  2418             compute_new_size();
  2419             save_heap_summary();
  2420             _collectorState = Resetting;
  2421           } else {
  2422             assert(_collectorState == Idling, "The state should only change"
  2423                    " because the foreground collector has finished the collection");
  2426         break;
  2428       case Resetting:
  2429         // CMS heap resizing has been completed
  2430         reset(true);
  2431         assert(_collectorState == Idling, "Collector state should "
  2432           "have changed");
  2434         MetaspaceGC::set_should_concurrent_collect(false);
  2436         stats().record_cms_end();
  2437         // Don't move the concurrent_phases_end() and compute_new_size()
  2438         // calls to here because a preempted background collection
  2439         // has it's state set to "Resetting".
  2440         break;
  2441       case Idling:
  2442       default:
  2443         ShouldNotReachHere();
  2444         break;
  2446     if (TraceCMSState) {
  2447       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
  2448         Thread::current(), _collectorState);
  2450     assert(_foregroundGCShouldWait, "block post-condition");
  2453   // Should this be in gc_epilogue?
  2454   collector_policy()->counters()->update_counters();
  2457     // Clear _foregroundGCShouldWait and, in the event that the
  2458     // foreground collector is waiting, notify it, before
  2459     // returning.
  2460     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2461     _foregroundGCShouldWait = false;
  2462     if (_foregroundGCIsActive) {
  2463       CGC_lock->notify();
  2465     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  2466            "Possible deadlock");
  2468   if (TraceCMSState) {
  2469     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
  2470       " exiting collection CMS state %d",
  2471       Thread::current(), _collectorState);
  2473   if (PrintGC && Verbose) {
  2474     _cmsGen->print_heap_change(prev_used);
  2478 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
  2479   if (!_cms_start_registered) {
  2480     register_gc_start(cause);
  2484 void CMSCollector::register_gc_start(GCCause::Cause cause) {
  2485   _cms_start_registered = true;
  2486   _gc_timer_cm->register_gc_start(os::elapsed_counter());
  2487   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
  2490 void CMSCollector::register_gc_end() {
  2491   if (_cms_start_registered) {
  2492     report_heap_summary(GCWhen::AfterGC);
  2494     _gc_timer_cm->register_gc_end(os::elapsed_counter());
  2495     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
  2496     _cms_start_registered = false;
  2500 void CMSCollector::save_heap_summary() {
  2501   GenCollectedHeap* gch = GenCollectedHeap::heap();
  2502   _last_heap_summary = gch->create_heap_summary();
  2503   _last_metaspace_summary = gch->create_metaspace_summary();
  2506 void CMSCollector::report_heap_summary(GCWhen::Type when) {
  2507   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
  2510 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
  2511   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
  2512          "Foreground collector should be waiting, not executing");
  2513   assert(Thread::current()->is_VM_thread(), "A foreground collection"
  2514     "may only be done by the VM Thread with the world stopped");
  2515   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
  2516          "VM thread should have CMS token");
  2518   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
  2519     true, NULL);)
  2520   if (UseAdaptiveSizePolicy) {
  2521     size_policy()->ms_collection_begin();
  2523   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
  2525   HandleMark hm;  // Discard invalid handles created during verification
  2527   if (VerifyBeforeGC &&
  2528       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2529     Universe::verify();
  2532   // Snapshot the soft reference policy to be used in this collection cycle.
  2533   ref_processor()->setup_policy(clear_all_soft_refs);
  2535   bool init_mark_was_synchronous = false; // until proven otherwise
  2536   while (_collectorState != Idling) {
  2537     if (TraceCMSState) {
  2538       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
  2539         Thread::current(), _collectorState);
  2541     switch (_collectorState) {
  2542       case InitialMarking:
  2543         register_foreground_gc_start(cause);
  2544         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
  2545         checkpointRootsInitial(false);
  2546         assert(_collectorState == Marking, "Collector state should have changed"
  2547           " within checkpointRootsInitial()");
  2548         break;
  2549       case Marking:
  2550         // initial marking in checkpointRootsInitialWork has been completed
  2551         if (VerifyDuringGC &&
  2552             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2553           Universe::verify("Verify before initial mark: ");
  2556           bool res = markFromRoots(false);
  2557           assert(res && _collectorState == FinalMarking, "Collector state should "
  2558             "have changed");
  2559           break;
  2561       case FinalMarking:
  2562         if (VerifyDuringGC &&
  2563             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2564           Universe::verify("Verify before re-mark: ");
  2566         checkpointRootsFinal(false, clear_all_soft_refs,
  2567                              init_mark_was_synchronous);
  2568         assert(_collectorState == Sweeping, "Collector state should not "
  2569           "have changed within checkpointRootsFinal()");
  2570         break;
  2571       case Sweeping:
  2572         // final marking in checkpointRootsFinal has been completed
  2573         if (VerifyDuringGC &&
  2574             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2575           Universe::verify("Verify before sweep: ");
  2577         sweep(false);
  2578         assert(_collectorState == Resizing, "Incorrect state");
  2579         break;
  2580       case Resizing: {
  2581         // Sweeping has been completed; the actual resize in this case
  2582         // is done separately; nothing to be done in this state.
  2583         _collectorState = Resetting;
  2584         break;
  2586       case Resetting:
  2587         // The heap has been resized.
  2588         if (VerifyDuringGC &&
  2589             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2590           Universe::verify("Verify before reset: ");
  2592         save_heap_summary();
  2593         reset(false);
  2594         assert(_collectorState == Idling, "Collector state should "
  2595           "have changed");
  2596         break;
  2597       case Precleaning:
  2598       case AbortablePreclean:
  2599         // Elide the preclean phase
  2600         _collectorState = FinalMarking;
  2601         break;
  2602       default:
  2603         ShouldNotReachHere();
  2605     if (TraceCMSState) {
  2606       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
  2607         Thread::current(), _collectorState);
  2611   if (UseAdaptiveSizePolicy) {
  2612     GenCollectedHeap* gch = GenCollectedHeap::heap();
  2613     size_policy()->ms_collection_end(gch->gc_cause());
  2616   if (VerifyAfterGC &&
  2617       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2618     Universe::verify();
  2620   if (TraceCMSState) {
  2621     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
  2622       " exiting collection CMS state %d",
  2623       Thread::current(), _collectorState);
  2627 bool CMSCollector::waitForForegroundGC() {
  2628   bool res = false;
  2629   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  2630          "CMS thread should have CMS token");
  2631   // Block the foreground collector until the
  2632   // background collectors decides whether to
  2633   // yield.
  2634   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2635   _foregroundGCShouldWait = true;
  2636   if (_foregroundGCIsActive) {
  2637     // The background collector yields to the
  2638     // foreground collector and returns a value
  2639     // indicating that it has yielded.  The foreground
  2640     // collector can proceed.
  2641     res = true;
  2642     _foregroundGCShouldWait = false;
  2643     ConcurrentMarkSweepThread::clear_CMS_flag(
  2644       ConcurrentMarkSweepThread::CMS_cms_has_token);
  2645     ConcurrentMarkSweepThread::set_CMS_flag(
  2646       ConcurrentMarkSweepThread::CMS_cms_wants_token);
  2647     // Get a possibly blocked foreground thread going
  2648     CGC_lock->notify();
  2649     if (TraceCMSState) {
  2650       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
  2651         Thread::current(), _collectorState);
  2653     while (_foregroundGCIsActive) {
  2654       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
  2656     ConcurrentMarkSweepThread::set_CMS_flag(
  2657       ConcurrentMarkSweepThread::CMS_cms_has_token);
  2658     ConcurrentMarkSweepThread::clear_CMS_flag(
  2659       ConcurrentMarkSweepThread::CMS_cms_wants_token);
  2661   if (TraceCMSState) {
  2662     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
  2663       Thread::current(), _collectorState);
  2665   return res;
  2668 // Because of the need to lock the free lists and other structures in
  2669 // the collector, common to all the generations that the collector is
  2670 // collecting, we need the gc_prologues of individual CMS generations
  2671 // delegate to their collector. It may have been simpler had the
  2672 // current infrastructure allowed one to call a prologue on a
  2673 // collector. In the absence of that we have the generation's
  2674 // prologue delegate to the collector, which delegates back
  2675 // some "local" work to a worker method in the individual generations
  2676 // that it's responsible for collecting, while itself doing any
  2677 // work common to all generations it's responsible for. A similar
  2678 // comment applies to the  gc_epilogue()'s.
  2679 // The role of the varaible _between_prologue_and_epilogue is to
  2680 // enforce the invocation protocol.
  2681 void CMSCollector::gc_prologue(bool full) {
  2682   // Call gc_prologue_work() for the CMSGen
  2683   // we are responsible for.
  2685   // The following locking discipline assumes that we are only called
  2686   // when the world is stopped.
  2687   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
  2689   // The CMSCollector prologue must call the gc_prologues for the
  2690   // "generations" that it's responsible
  2691   // for.
  2693   assert(   Thread::current()->is_VM_thread()
  2694          || (   CMSScavengeBeforeRemark
  2695              && Thread::current()->is_ConcurrentGC_thread()),
  2696          "Incorrect thread type for prologue execution");
  2698   if (_between_prologue_and_epilogue) {
  2699     // We have already been invoked; this is a gc_prologue delegation
  2700     // from yet another CMS generation that we are responsible for, just
  2701     // ignore it since all relevant work has already been done.
  2702     return;
  2705   // set a bit saying prologue has been called; cleared in epilogue
  2706   _between_prologue_and_epilogue = true;
  2707   // Claim locks for common data structures, then call gc_prologue_work()
  2708   // for each CMSGen.
  2710   getFreelistLocks();   // gets free list locks on constituent spaces
  2711   bitMapLock()->lock_without_safepoint_check();
  2713   // Should call gc_prologue_work() for all cms gens we are responsible for
  2714   bool duringMarking =    _collectorState >= Marking
  2715                          && _collectorState < Sweeping;
  2717   // The young collections clear the modified oops state, which tells if
  2718   // there are any modified oops in the class. The remark phase also needs
  2719   // that information. Tell the young collection to save the union of all
  2720   // modified klasses.
  2721   if (duringMarking) {
  2722     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
  2725   bool registerClosure = duringMarking;
  2727   ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
  2728                                                &_modUnionClosurePar
  2729                                                : &_modUnionClosure;
  2730   _cmsGen->gc_prologue_work(full, registerClosure, muc);
  2732   if (!full) {
  2733     stats().record_gc0_begin();
  2737 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
  2739   _capacity_at_prologue = capacity();
  2740   _used_at_prologue = used();
  2742   // Delegate to CMScollector which knows how to coordinate between
  2743   // this and any other CMS generations that it is responsible for
  2744   // collecting.
  2745   collector()->gc_prologue(full);
  2748 // This is a "private" interface for use by this generation's CMSCollector.
  2749 // Not to be called directly by any other entity (for instance,
  2750 // GenCollectedHeap, which calls the "public" gc_prologue method above).
  2751 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
  2752   bool registerClosure, ModUnionClosure* modUnionClosure) {
  2753   assert(!incremental_collection_failed(), "Shouldn't be set yet");
  2754   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
  2755     "Should be NULL");
  2756   if (registerClosure) {
  2757     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
  2759   cmsSpace()->gc_prologue();
  2760   // Clear stat counters
  2761   NOT_PRODUCT(
  2762     assert(_numObjectsPromoted == 0, "check");
  2763     assert(_numWordsPromoted   == 0, "check");
  2764     if (Verbose && PrintGC) {
  2765       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
  2766                           SIZE_FORMAT" bytes concurrently",
  2767       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
  2769     _numObjectsAllocated = 0;
  2770     _numWordsAllocated   = 0;
  2774 void CMSCollector::gc_epilogue(bool full) {
  2775   // The following locking discipline assumes that we are only called
  2776   // when the world is stopped.
  2777   assert(SafepointSynchronize::is_at_safepoint(),
  2778          "world is stopped assumption");
  2780   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
  2781   // if linear allocation blocks need to be appropriately marked to allow the
  2782   // the blocks to be parsable. We also check here whether we need to nudge the
  2783   // CMS collector thread to start a new cycle (if it's not already active).
  2784   assert(   Thread::current()->is_VM_thread()
  2785          || (   CMSScavengeBeforeRemark
  2786              && Thread::current()->is_ConcurrentGC_thread()),
  2787          "Incorrect thread type for epilogue execution");
  2789   if (!_between_prologue_and_epilogue) {
  2790     // We have already been invoked; this is a gc_epilogue delegation
  2791     // from yet another CMS generation that we are responsible for, just
  2792     // ignore it since all relevant work has already been done.
  2793     return;
  2795   assert(haveFreelistLocks(), "must have freelist locks");
  2796   assert_lock_strong(bitMapLock());
  2798   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
  2800   _cmsGen->gc_epilogue_work(full);
  2802   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
  2803     // in case sampling was not already enabled, enable it
  2804     _start_sampling = true;
  2806   // reset _eden_chunk_array so sampling starts afresh
  2807   _eden_chunk_index = 0;
  2809   size_t cms_used   = _cmsGen->cmsSpace()->used();
  2811   // update performance counters - this uses a special version of
  2812   // update_counters() that allows the utilization to be passed as a
  2813   // parameter, avoiding multiple calls to used().
  2814   //
  2815   _cmsGen->update_counters(cms_used);
  2817   if (CMSIncrementalMode) {
  2818     icms_update_allocation_limits();
  2821   bitMapLock()->unlock();
  2822   releaseFreelistLocks();
  2824   if (!CleanChunkPoolAsync) {
  2825     Chunk::clean_chunk_pool();
  2828   set_did_compact(false);
  2829   _between_prologue_and_epilogue = false;  // ready for next cycle
  2832 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
  2833   collector()->gc_epilogue(full);
  2835   // Also reset promotion tracking in par gc thread states.
  2836   if (CollectedHeap::use_parallel_gc_threads()) {
  2837     for (uint i = 0; i < ParallelGCThreads; i++) {
  2838       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
  2843 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
  2844   assert(!incremental_collection_failed(), "Should have been cleared");
  2845   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
  2846   cmsSpace()->gc_epilogue();
  2847     // Print stat counters
  2848   NOT_PRODUCT(
  2849     assert(_numObjectsAllocated == 0, "check");
  2850     assert(_numWordsAllocated == 0, "check");
  2851     if (Verbose && PrintGC) {
  2852       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
  2853                           SIZE_FORMAT" bytes",
  2854                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
  2856     _numObjectsPromoted = 0;
  2857     _numWordsPromoted   = 0;
  2860   if (PrintGC && Verbose) {
  2861     // Call down the chain in contiguous_available needs the freelistLock
  2862     // so print this out before releasing the freeListLock.
  2863     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
  2864                         contiguous_available());
  2868 #ifndef PRODUCT
  2869 bool CMSCollector::have_cms_token() {
  2870   Thread* thr = Thread::current();
  2871   if (thr->is_VM_thread()) {
  2872     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
  2873   } else if (thr->is_ConcurrentGC_thread()) {
  2874     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
  2875   } else if (thr->is_GC_task_thread()) {
  2876     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
  2877            ParGCRareEvent_lock->owned_by_self();
  2879   return false;
  2881 #endif
  2883 // Check reachability of the given heap address in CMS generation,
  2884 // treating all other generations as roots.
  2885 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
  2886   // We could "guarantee" below, rather than assert, but i'll
  2887   // leave these as "asserts" so that an adventurous debugger
  2888   // could try this in the product build provided some subset of
  2889   // the conditions were met, provided they were intersted in the
  2890   // results and knew that the computation below wouldn't interfere
  2891   // with other concurrent computations mutating the structures
  2892   // being read or written.
  2893   assert(SafepointSynchronize::is_at_safepoint(),
  2894          "Else mutations in object graph will make answer suspect");
  2895   assert(have_cms_token(), "Should hold cms token");
  2896   assert(haveFreelistLocks(), "must hold free list locks");
  2897   assert_lock_strong(bitMapLock());
  2899   // Clear the marking bit map array before starting, but, just
  2900   // for kicks, first report if the given address is already marked
  2901   gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
  2902                 _markBitMap.isMarked(addr) ? "" : " not");
  2904   if (verify_after_remark()) {
  2905     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
  2906     bool result = verification_mark_bm()->isMarked(addr);
  2907     gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
  2908                            result ? "IS" : "is NOT");
  2909     return result;
  2910   } else {
  2911     gclog_or_tty->print_cr("Could not compute result");
  2912     return false;
  2917 void
  2918 CMSCollector::print_on_error(outputStream* st) {
  2919   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
  2920   if (collector != NULL) {
  2921     CMSBitMap* bitmap = &collector->_markBitMap;
  2922     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
  2923     bitmap->print_on_error(st, " Bits: ");
  2925     st->cr();
  2927     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
  2928     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
  2929     mut_bitmap->print_on_error(st, " Bits: ");
  2933 ////////////////////////////////////////////////////////
  2934 // CMS Verification Support
  2935 ////////////////////////////////////////////////////////
  2936 // Following the remark phase, the following invariant
  2937 // should hold -- each object in the CMS heap which is
  2938 // marked in markBitMap() should be marked in the verification_mark_bm().
  2940 class VerifyMarkedClosure: public BitMapClosure {
  2941   CMSBitMap* _marks;
  2942   bool       _failed;
  2944  public:
  2945   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
  2947   bool do_bit(size_t offset) {
  2948     HeapWord* addr = _marks->offsetToHeapWord(offset);
  2949     if (!_marks->isMarked(addr)) {
  2950       oop(addr)->print_on(gclog_or_tty);
  2951       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
  2952       _failed = true;
  2954     return true;
  2957   bool failed() { return _failed; }
  2958 };
  2960 bool CMSCollector::verify_after_remark(bool silent) {
  2961   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
  2962   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
  2963   static bool init = false;
  2965   assert(SafepointSynchronize::is_at_safepoint(),
  2966          "Else mutations in object graph will make answer suspect");
  2967   assert(have_cms_token(),
  2968          "Else there may be mutual interference in use of "
  2969          " verification data structures");
  2970   assert(_collectorState > Marking && _collectorState <= Sweeping,
  2971          "Else marking info checked here may be obsolete");
  2972   assert(haveFreelistLocks(), "must hold free list locks");
  2973   assert_lock_strong(bitMapLock());
  2976   // Allocate marking bit map if not already allocated
  2977   if (!init) { // first time
  2978     if (!verification_mark_bm()->allocate(_span)) {
  2979       return false;
  2981     init = true;
  2984   assert(verification_mark_stack()->isEmpty(), "Should be empty");
  2986   // Turn off refs discovery -- so we will be tracing through refs.
  2987   // This is as intended, because by this time
  2988   // GC must already have cleared any refs that need to be cleared,
  2989   // and traced those that need to be marked; moreover,
  2990   // the marking done here is not going to intefere in any
  2991   // way with the marking information used by GC.
  2992   NoRefDiscovery no_discovery(ref_processor());
  2994   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
  2996   // Clear any marks from a previous round
  2997   verification_mark_bm()->clear_all();
  2998   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
  2999   verify_work_stacks_empty();
  3001   GenCollectedHeap* gch = GenCollectedHeap::heap();
  3002   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
  3003   // Update the saved marks which may affect the root scans.
  3004   gch->save_marks();
  3006   if (CMSRemarkVerifyVariant == 1) {
  3007     // In this first variant of verification, we complete
  3008     // all marking, then check if the new marks-verctor is
  3009     // a subset of the CMS marks-vector.
  3010     verify_after_remark_work_1();
  3011   } else if (CMSRemarkVerifyVariant == 2) {
  3012     // In this second variant of verification, we flag an error
  3013     // (i.e. an object reachable in the new marks-vector not reachable
  3014     // in the CMS marks-vector) immediately, also indicating the
  3015     // identify of an object (A) that references the unmarked object (B) --
  3016     // presumably, a mutation to A failed to be picked up by preclean/remark?
  3017     verify_after_remark_work_2();
  3018   } else {
  3019     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
  3020             CMSRemarkVerifyVariant);
  3022   if (!silent) gclog_or_tty->print(" done] ");
  3023   return true;
  3026 void CMSCollector::verify_after_remark_work_1() {
  3027   ResourceMark rm;
  3028   HandleMark  hm;
  3029   GenCollectedHeap* gch = GenCollectedHeap::heap();
  3031   // Get a clear set of claim bits for the strong roots processing to work with.
  3032   ClassLoaderDataGraph::clear_claimed_marks();
  3034   // Mark from roots one level into CMS
  3035   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
  3036   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  3038   gch->gen_process_strong_roots(_cmsGen->level(),
  3039                                 true,   // younger gens are roots
  3040                                 true,   // activate StrongRootsScope
  3041                                 false,  // not scavenging
  3042                                 SharedHeap::ScanningOption(roots_scanning_options()),
  3043                                 &notOlder,
  3044                                 true,   // walk code active on stacks
  3045                                 NULL,
  3046                                 NULL); // SSS: Provide correct closure
  3048   // Now mark from the roots
  3049   MarkFromRootsClosure markFromRootsClosure(this, _span,
  3050     verification_mark_bm(), verification_mark_stack(),
  3051     false /* don't yield */, true /* verifying */);
  3052   assert(_restart_addr == NULL, "Expected pre-condition");
  3053   verification_mark_bm()->iterate(&markFromRootsClosure);
  3054   while (_restart_addr != NULL) {
  3055     // Deal with stack overflow: by restarting at the indicated
  3056     // address.
  3057     HeapWord* ra = _restart_addr;
  3058     markFromRootsClosure.reset(ra);
  3059     _restart_addr = NULL;
  3060     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
  3062   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
  3063   verify_work_stacks_empty();
  3065   // Marking completed -- now verify that each bit marked in
  3066   // verification_mark_bm() is also marked in markBitMap(); flag all
  3067   // errors by printing corresponding objects.
  3068   VerifyMarkedClosure vcl(markBitMap());
  3069   verification_mark_bm()->iterate(&vcl);
  3070   if (vcl.failed()) {
  3071     gclog_or_tty->print("Verification failed");
  3072     Universe::heap()->print_on(gclog_or_tty);
  3073     fatal("CMS: failed marking verification after remark");
  3077 class VerifyKlassOopsKlassClosure : public KlassClosure {
  3078   class VerifyKlassOopsClosure : public OopClosure {
  3079     CMSBitMap* _bitmap;
  3080    public:
  3081     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
  3082     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
  3083     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  3084   } _oop_closure;
  3085  public:
  3086   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
  3087   void do_klass(Klass* k) {
  3088     k->oops_do(&_oop_closure);
  3090 };
  3092 void CMSCollector::verify_after_remark_work_2() {
  3093   ResourceMark rm;
  3094   HandleMark  hm;
  3095   GenCollectedHeap* gch = GenCollectedHeap::heap();
  3097   // Get a clear set of claim bits for the strong roots processing to work with.
  3098   ClassLoaderDataGraph::clear_claimed_marks();
  3100   // Mark from roots one level into CMS
  3101   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
  3102                                      markBitMap());
  3103   CMKlassClosure klass_closure(&notOlder);
  3105   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  3106   gch->gen_process_strong_roots(_cmsGen->level(),
  3107                                 true,   // younger gens are roots
  3108                                 true,   // activate StrongRootsScope
  3109                                 false,  // not scavenging
  3110                                 SharedHeap::ScanningOption(roots_scanning_options()),
  3111                                 &notOlder,
  3112                                 true,   // walk code active on stacks
  3113                                 NULL,
  3114                                 &klass_closure);
  3116   // Now mark from the roots
  3117   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
  3118     verification_mark_bm(), markBitMap(), verification_mark_stack());
  3119   assert(_restart_addr == NULL, "Expected pre-condition");
  3120   verification_mark_bm()->iterate(&markFromRootsClosure);
  3121   while (_restart_addr != NULL) {
  3122     // Deal with stack overflow: by restarting at the indicated
  3123     // address.
  3124     HeapWord* ra = _restart_addr;
  3125     markFromRootsClosure.reset(ra);
  3126     _restart_addr = NULL;
  3127     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
  3129   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
  3130   verify_work_stacks_empty();
  3132   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
  3133   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
  3135   // Marking completed -- now verify that each bit marked in
  3136   // verification_mark_bm() is also marked in markBitMap(); flag all
  3137   // errors by printing corresponding objects.
  3138   VerifyMarkedClosure vcl(markBitMap());
  3139   verification_mark_bm()->iterate(&vcl);
  3140   assert(!vcl.failed(), "Else verification above should not have succeeded");
  3143 void ConcurrentMarkSweepGeneration::save_marks() {
  3144   // delegate to CMS space
  3145   cmsSpace()->save_marks();
  3146   for (uint i = 0; i < ParallelGCThreads; i++) {
  3147     _par_gc_thread_states[i]->promo.startTrackingPromotions();
  3151 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
  3152   return cmsSpace()->no_allocs_since_save_marks();
  3155 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
  3157 void ConcurrentMarkSweepGeneration::                            \
  3158 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
  3159   cl->set_generation(this);                                     \
  3160   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
  3161   cl->reset_generation();                                       \
  3162   save_marks();                                                 \
  3165 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
  3167 void
  3168 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
  3169   cl->set_generation(this);
  3170   younger_refs_in_space_iterate(_cmsSpace, cl);
  3171   cl->reset_generation();
  3174 void
  3175 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
  3176   if (freelistLock()->owned_by_self()) {
  3177     Generation::oop_iterate(mr, cl);
  3178   } else {
  3179     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  3180     Generation::oop_iterate(mr, cl);
  3184 void
  3185 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
  3186   if (freelistLock()->owned_by_self()) {
  3187     Generation::oop_iterate(cl);
  3188   } else {
  3189     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  3190     Generation::oop_iterate(cl);
  3194 void
  3195 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
  3196   if (freelistLock()->owned_by_self()) {
  3197     Generation::object_iterate(cl);
  3198   } else {
  3199     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  3200     Generation::object_iterate(cl);
  3204 void
  3205 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
  3206   if (freelistLock()->owned_by_self()) {
  3207     Generation::safe_object_iterate(cl);
  3208   } else {
  3209     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  3210     Generation::safe_object_iterate(cl);
  3214 void
  3215 ConcurrentMarkSweepGeneration::post_compact() {
  3218 void
  3219 ConcurrentMarkSweepGeneration::prepare_for_verify() {
  3220   // Fix the linear allocation blocks to look like free blocks.
  3222   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
  3223   // are not called when the heap is verified during universe initialization and
  3224   // at vm shutdown.
  3225   if (freelistLock()->owned_by_self()) {
  3226     cmsSpace()->prepare_for_verify();
  3227   } else {
  3228     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
  3229     cmsSpace()->prepare_for_verify();
  3233 void
  3234 ConcurrentMarkSweepGeneration::verify() {
  3235   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
  3236   // are not called when the heap is verified during universe initialization and
  3237   // at vm shutdown.
  3238   if (freelistLock()->owned_by_self()) {
  3239     cmsSpace()->verify();
  3240   } else {
  3241     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
  3242     cmsSpace()->verify();
  3246 void CMSCollector::verify() {
  3247   _cmsGen->verify();
  3250 #ifndef PRODUCT
  3251 bool CMSCollector::overflow_list_is_empty() const {
  3252   assert(_num_par_pushes >= 0, "Inconsistency");
  3253   if (_overflow_list == NULL) {
  3254     assert(_num_par_pushes == 0, "Inconsistency");
  3256   return _overflow_list == NULL;
  3259 // The methods verify_work_stacks_empty() and verify_overflow_empty()
  3260 // merely consolidate assertion checks that appear to occur together frequently.
  3261 void CMSCollector::verify_work_stacks_empty() const {
  3262   assert(_markStack.isEmpty(), "Marking stack should be empty");
  3263   assert(overflow_list_is_empty(), "Overflow list should be empty");
  3266 void CMSCollector::verify_overflow_empty() const {
  3267   assert(overflow_list_is_empty(), "Overflow list should be empty");
  3268   assert(no_preserved_marks(), "No preserved marks");
  3270 #endif // PRODUCT
  3272 // Decide if we want to enable class unloading as part of the
  3273 // ensuing concurrent GC cycle. We will collect and
  3274 // unload classes if it's the case that:
  3275 // (1) an explicit gc request has been made and the flag
  3276 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
  3277 // (2) (a) class unloading is enabled at the command line, and
  3278 //     (b) old gen is getting really full
  3279 // NOTE: Provided there is no change in the state of the heap between
  3280 // calls to this method, it should have idempotent results. Moreover,
  3281 // its results should be monotonically increasing (i.e. going from 0 to 1,
  3282 // but not 1 to 0) between successive calls between which the heap was
  3283 // not collected. For the implementation below, it must thus rely on
  3284 // the property that concurrent_cycles_since_last_unload()
  3285 // will not decrease unless a collection cycle happened and that
  3286 // _cmsGen->is_too_full() are
  3287 // themselves also monotonic in that sense. See check_monotonicity()
  3288 // below.
  3289 void CMSCollector::update_should_unload_classes() {
  3290   _should_unload_classes = false;
  3291   // Condition 1 above
  3292   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
  3293     _should_unload_classes = true;
  3294   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
  3295     // Disjuncts 2.b.(i,ii,iii) above
  3296     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
  3297                               CMSClassUnloadingMaxInterval)
  3298                            || _cmsGen->is_too_full();
  3302 bool ConcurrentMarkSweepGeneration::is_too_full() const {
  3303   bool res = should_concurrent_collect();
  3304   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
  3305   return res;
  3308 void CMSCollector::setup_cms_unloading_and_verification_state() {
  3309   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
  3310                              || VerifyBeforeExit;
  3311   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
  3313   if (should_unload_classes()) {   // Should unload classes this cycle
  3314     remove_root_scanning_option(rso);  // Shrink the root set appropriately
  3315     set_verifying(should_verify);    // Set verification state for this cycle
  3316     return;                            // Nothing else needs to be done at this time
  3319   // Not unloading classes this cycle
  3320   assert(!should_unload_classes(), "Inconsitency!");
  3321   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
  3322     // Include symbols, strings and code cache elements to prevent their resurrection.
  3323     add_root_scanning_option(rso);
  3324     set_verifying(true);
  3325   } else if (verifying() && !should_verify) {
  3326     // We were verifying, but some verification flags got disabled.
  3327     set_verifying(false);
  3328     // Exclude symbols, strings and code cache elements from root scanning to
  3329     // reduce IM and RM pauses.
  3330     remove_root_scanning_option(rso);
  3335 #ifndef PRODUCT
  3336 HeapWord* CMSCollector::block_start(const void* p) const {
  3337   const HeapWord* addr = (HeapWord*)p;
  3338   if (_span.contains(p)) {
  3339     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
  3340       return _cmsGen->cmsSpace()->block_start(p);
  3343   return NULL;
  3345 #endif
  3347 HeapWord*
  3348 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
  3349                                                    bool   tlab,
  3350                                                    bool   parallel) {
  3351   CMSSynchronousYieldRequest yr;
  3352   assert(!tlab, "Can't deal with TLAB allocation");
  3353   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  3354   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
  3355     CMSExpansionCause::_satisfy_allocation);
  3356   if (GCExpandToAllocateDelayMillis > 0) {
  3357     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
  3359   return have_lock_and_allocate(word_size, tlab);
  3362 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
  3363 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
  3364 // to CardGeneration and share it...
  3365 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
  3366   return CardGeneration::expand(bytes, expand_bytes);
  3369 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
  3370   CMSExpansionCause::Cause cause)
  3373   bool success = expand(bytes, expand_bytes);
  3375   // remember why we expanded; this information is used
  3376   // by shouldConcurrentCollect() when making decisions on whether to start
  3377   // a new CMS cycle.
  3378   if (success) {
  3379     set_expansion_cause(cause);
  3380     if (PrintGCDetails && Verbose) {
  3381       gclog_or_tty->print_cr("Expanded CMS gen for %s",
  3382         CMSExpansionCause::to_string(cause));
  3387 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
  3388   HeapWord* res = NULL;
  3389   MutexLocker x(ParGCRareEvent_lock);
  3390   while (true) {
  3391     // Expansion by some other thread might make alloc OK now:
  3392     res = ps->lab.alloc(word_sz);
  3393     if (res != NULL) return res;
  3394     // If there's not enough expansion space available, give up.
  3395     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
  3396       return NULL;
  3398     // Otherwise, we try expansion.
  3399     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
  3400       CMSExpansionCause::_allocate_par_lab);
  3401     // Now go around the loop and try alloc again;
  3402     // A competing par_promote might beat us to the expansion space,
  3403     // so we may go around the loop again if promotion fails agaion.
  3404     if (GCExpandToAllocateDelayMillis > 0) {
  3405       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
  3411 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
  3412   PromotionInfo* promo) {
  3413   MutexLocker x(ParGCRareEvent_lock);
  3414   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
  3415   while (true) {
  3416     // Expansion by some other thread might make alloc OK now:
  3417     if (promo->ensure_spooling_space()) {
  3418       assert(promo->has_spooling_space(),
  3419              "Post-condition of successful ensure_spooling_space()");
  3420       return true;
  3422     // If there's not enough expansion space available, give up.
  3423     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
  3424       return false;
  3426     // Otherwise, we try expansion.
  3427     expand(refill_size_bytes, MinHeapDeltaBytes,
  3428       CMSExpansionCause::_allocate_par_spooling_space);
  3429     // Now go around the loop and try alloc again;
  3430     // A competing allocation might beat us to the expansion space,
  3431     // so we may go around the loop again if allocation fails again.
  3432     if (GCExpandToAllocateDelayMillis > 0) {
  3433       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
  3439 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
  3440   assert_locked_or_safepoint(ExpandHeap_lock);
  3441   // Shrink committed space
  3442   _virtual_space.shrink_by(bytes);
  3443   // Shrink space; this also shrinks the space's BOT
  3444   _cmsSpace->set_end((HeapWord*) _virtual_space.high());
  3445   size_t new_word_size = heap_word_size(_cmsSpace->capacity());
  3446   // Shrink the shared block offset array
  3447   _bts->resize(new_word_size);
  3448   MemRegion mr(_cmsSpace->bottom(), new_word_size);
  3449   // Shrink the card table
  3450   Universe::heap()->barrier_set()->resize_covered_region(mr);
  3452   if (Verbose && PrintGC) {
  3453     size_t new_mem_size = _virtual_space.committed_size();
  3454     size_t old_mem_size = new_mem_size + bytes;
  3455     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
  3456                   name(), old_mem_size/K, new_mem_size/K);
  3460 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
  3461   assert_locked_or_safepoint(Heap_lock);
  3462   size_t size = ReservedSpace::page_align_size_down(bytes);
  3463   // Only shrink if a compaction was done so that all the free space
  3464   // in the generation is in a contiguous block at the end.
  3465   if (size > 0 && did_compact()) {
  3466     shrink_by(size);
  3470 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
  3471   assert_locked_or_safepoint(Heap_lock);
  3472   bool result = _virtual_space.expand_by(bytes);
  3473   if (result) {
  3474     size_t new_word_size =
  3475       heap_word_size(_virtual_space.committed_size());
  3476     MemRegion mr(_cmsSpace->bottom(), new_word_size);
  3477     _bts->resize(new_word_size);  // resize the block offset shared array
  3478     Universe::heap()->barrier_set()->resize_covered_region(mr);
  3479     // Hmmmm... why doesn't CFLS::set_end verify locking?
  3480     // This is quite ugly; FIX ME XXX
  3481     _cmsSpace->assert_locked(freelistLock());
  3482     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
  3484     // update the space and generation capacity counters
  3485     if (UsePerfData) {
  3486       _space_counters->update_capacity();
  3487       _gen_counters->update_all();
  3490     if (Verbose && PrintGC) {
  3491       size_t new_mem_size = _virtual_space.committed_size();
  3492       size_t old_mem_size = new_mem_size - bytes;
  3493       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
  3494                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
  3497   return result;
  3500 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
  3501   assert_locked_or_safepoint(Heap_lock);
  3502   bool success = true;
  3503   const size_t remaining_bytes = _virtual_space.uncommitted_size();
  3504   if (remaining_bytes > 0) {
  3505     success = grow_by(remaining_bytes);
  3506     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
  3508   return success;
  3511 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
  3512   assert_locked_or_safepoint(Heap_lock);
  3513   assert_lock_strong(freelistLock());
  3514   if (PrintGCDetails && Verbose) {
  3515     warning("Shrinking of CMS not yet implemented");
  3517   return;
  3521 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
  3522 // phases.
  3523 class CMSPhaseAccounting: public StackObj {
  3524  public:
  3525   CMSPhaseAccounting(CMSCollector *collector,
  3526                      const char *phase,
  3527                      bool print_cr = true);
  3528   ~CMSPhaseAccounting();
  3530  private:
  3531   CMSCollector *_collector;
  3532   const char *_phase;
  3533   elapsedTimer _wallclock;
  3534   bool _print_cr;
  3536  public:
  3537   // Not MT-safe; so do not pass around these StackObj's
  3538   // where they may be accessed by other threads.
  3539   jlong wallclock_millis() {
  3540     assert(_wallclock.is_active(), "Wall clock should not stop");
  3541     _wallclock.stop();  // to record time
  3542     jlong ret = _wallclock.milliseconds();
  3543     _wallclock.start(); // restart
  3544     return ret;
  3546 };
  3548 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
  3549                                        const char *phase,
  3550                                        bool print_cr) :
  3551   _collector(collector), _phase(phase), _print_cr(print_cr) {
  3553   if (PrintCMSStatistics != 0) {
  3554     _collector->resetYields();
  3556   if (PrintGCDetails) {
  3557     gclog_or_tty->date_stamp(PrintGCDateStamps);
  3558     gclog_or_tty->stamp(PrintGCTimeStamps);
  3559     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
  3560       _collector->cmsGen()->short_name(), _phase);
  3562   _collector->resetTimer();
  3563   _wallclock.start();
  3564   _collector->startTimer();
  3567 CMSPhaseAccounting::~CMSPhaseAccounting() {
  3568   assert(_wallclock.is_active(), "Wall clock should not have stopped");
  3569   _collector->stopTimer();
  3570   _wallclock.stop();
  3571   if (PrintGCDetails) {
  3572     gclog_or_tty->date_stamp(PrintGCDateStamps);
  3573     gclog_or_tty->stamp(PrintGCTimeStamps);
  3574     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
  3575                  _collector->cmsGen()->short_name(),
  3576                  _phase, _collector->timerValue(), _wallclock.seconds());
  3577     if (_print_cr) {
  3578       gclog_or_tty->print_cr("");
  3580     if (PrintCMSStatistics != 0) {
  3581       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
  3582                     _collector->yields());
  3587 // CMS work
  3589 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
  3590 class CMSParMarkTask : public AbstractGangTask {
  3591  protected:
  3592   CMSCollector*     _collector;
  3593   int               _n_workers;
  3594   CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
  3595       AbstractGangTask(name),
  3596       _collector(collector),
  3597       _n_workers(n_workers) {}
  3598   // Work method in support of parallel rescan ... of young gen spaces
  3599   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
  3600                              ContiguousSpace* space,
  3601                              HeapWord** chunk_array, size_t chunk_top);
  3602   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
  3603 };
  3605 // Parallel initial mark task
  3606 class CMSParInitialMarkTask: public CMSParMarkTask {
  3607  public:
  3608   CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
  3609       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
  3610                      collector, n_workers) {}
  3611   void work(uint worker_id);
  3612 };
  3614 // Checkpoint the roots into this generation from outside
  3615 // this generation. [Note this initial checkpoint need only
  3616 // be approximate -- we'll do a catch up phase subsequently.]
  3617 void CMSCollector::checkpointRootsInitial(bool asynch) {
  3618   assert(_collectorState == InitialMarking, "Wrong collector state");
  3619   check_correct_thread_executing();
  3620   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
  3622   save_heap_summary();
  3623   report_heap_summary(GCWhen::BeforeGC);
  3625   ReferenceProcessor* rp = ref_processor();
  3626   SpecializationStats::clear();
  3627   assert(_restart_addr == NULL, "Control point invariant");
  3628   if (asynch) {
  3629     // acquire locks for subsequent manipulations
  3630     MutexLockerEx x(bitMapLock(),
  3631                     Mutex::_no_safepoint_check_flag);
  3632     checkpointRootsInitialWork(asynch);
  3633     // enable ("weak") refs discovery
  3634     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
  3635     _collectorState = Marking;
  3636   } else {
  3637     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
  3638     // which recognizes if we are a CMS generation, and doesn't try to turn on
  3639     // discovery; verify that they aren't meddling.
  3640     assert(!rp->discovery_is_atomic(),
  3641            "incorrect setting of discovery predicate");
  3642     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
  3643            "ref discovery for this generation kind");
  3644     // already have locks
  3645     checkpointRootsInitialWork(asynch);
  3646     // now enable ("weak") refs discovery
  3647     rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
  3648     _collectorState = Marking;
  3650   SpecializationStats::print();
  3653 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
  3654   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
  3655   assert(_collectorState == InitialMarking, "just checking");
  3657   // If there has not been a GC[n-1] since last GC[n] cycle completed,
  3658   // precede our marking with a collection of all
  3659   // younger generations to keep floating garbage to a minimum.
  3660   // XXX: we won't do this for now -- it's an optimization to be done later.
  3662   // already have locks
  3663   assert_lock_strong(bitMapLock());
  3664   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
  3666   // Setup the verification and class unloading state for this
  3667   // CMS collection cycle.
  3668   setup_cms_unloading_and_verification_state();
  3670   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
  3671     PrintGCDetails && Verbose, true, _gc_timer_cm);)
  3672   if (UseAdaptiveSizePolicy) {
  3673     size_policy()->checkpoint_roots_initial_begin();
  3676   // Reset all the PLAB chunk arrays if necessary.
  3677   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
  3678     reset_survivor_plab_arrays();
  3681   ResourceMark rm;
  3682   HandleMark  hm;
  3684   FalseClosure falseClosure;
  3685   // In the case of a synchronous collection, we will elide the
  3686   // remark step, so it's important to catch all the nmethod oops
  3687   // in this step.
  3688   // The final 'true' flag to gen_process_strong_roots will ensure this.
  3689   // If 'async' is true, we can relax the nmethod tracing.
  3690   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
  3691   GenCollectedHeap* gch = GenCollectedHeap::heap();
  3693   verify_work_stacks_empty();
  3694   verify_overflow_empty();
  3696   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
  3697   // Update the saved marks which may affect the root scans.
  3698   gch->save_marks();
  3700   // weak reference processing has not started yet.
  3701   ref_processor()->set_enqueuing_is_done(false);
  3703   // Need to remember all newly created CLDs,
  3704   // so that we can guarantee that the remark finds them.
  3705   ClassLoaderDataGraph::remember_new_clds(true);
  3707   // Whenever a CLD is found, it will be claimed before proceeding to mark
  3708   // the klasses. The claimed marks need to be cleared before marking starts.
  3709   ClassLoaderDataGraph::clear_claimed_marks();
  3711   if (CMSPrintEdenSurvivorChunks) {
  3712     print_eden_and_survivor_chunk_arrays();
  3716     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
  3717     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
  3718       // The parallel version.
  3719       FlexibleWorkGang* workers = gch->workers();
  3720       assert(workers != NULL, "Need parallel worker threads.");
  3721       int n_workers = workers->active_workers();
  3722       CMSParInitialMarkTask tsk(this, n_workers);
  3723       gch->set_par_threads(n_workers);
  3724       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
  3725       if (n_workers > 1) {
  3726         GenCollectedHeap::StrongRootsScope srs(gch);
  3727         workers->run_task(&tsk);
  3728       } else {
  3729         GenCollectedHeap::StrongRootsScope srs(gch);
  3730         tsk.work(0);
  3732       gch->set_par_threads(0);
  3733     } else {
  3734       // The serial version.
  3735       CMKlassClosure klass_closure(&notOlder);
  3736       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  3737       gch->gen_process_strong_roots(_cmsGen->level(),
  3738                                     true,   // younger gens are roots
  3739                                     true,   // activate StrongRootsScope
  3740                                     false,  // not scavenging
  3741                                     SharedHeap::ScanningOption(roots_scanning_options()),
  3742                                     &notOlder,
  3743                                     true,   // walk all of code cache if (so & SO_CodeCache)
  3744                                     NULL,
  3745                                     &klass_closure);
  3749   // Clear mod-union table; it will be dirtied in the prologue of
  3750   // CMS generation per each younger generation collection.
  3752   assert(_modUnionTable.isAllClear(),
  3753        "Was cleared in most recent final checkpoint phase"
  3754        " or no bits are set in the gc_prologue before the start of the next "
  3755        "subsequent marking phase.");
  3757   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
  3759   // Save the end of the used_region of the constituent generations
  3760   // to be used to limit the extent of sweep in each generation.
  3761   save_sweep_limits();
  3762   if (UseAdaptiveSizePolicy) {
  3763     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
  3765   verify_overflow_empty();
  3768 bool CMSCollector::markFromRoots(bool asynch) {
  3769   // we might be tempted to assert that:
  3770   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
  3771   //        "inconsistent argument?");
  3772   // However that wouldn't be right, because it's possible that
  3773   // a safepoint is indeed in progress as a younger generation
  3774   // stop-the-world GC happens even as we mark in this generation.
  3775   assert(_collectorState == Marking, "inconsistent state?");
  3776   check_correct_thread_executing();
  3777   verify_overflow_empty();
  3779   bool res;
  3780   if (asynch) {
  3782     // Start the timers for adaptive size policy for the concurrent phases
  3783     // Do it here so that the foreground MS can use the concurrent
  3784     // timer since a foreground MS might has the sweep done concurrently
  3785     // or STW.
  3786     if (UseAdaptiveSizePolicy) {
  3787       size_policy()->concurrent_marking_begin();
  3790     // Weak ref discovery note: We may be discovering weak
  3791     // refs in this generation concurrent (but interleaved) with
  3792     // weak ref discovery by a younger generation collector.
  3794     CMSTokenSyncWithLocks ts(true, bitMapLock());
  3795     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  3796     CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
  3797     res = markFromRootsWork(asynch);
  3798     if (res) {
  3799       _collectorState = Precleaning;
  3800     } else { // We failed and a foreground collection wants to take over
  3801       assert(_foregroundGCIsActive, "internal state inconsistency");
  3802       assert(_restart_addr == NULL,  "foreground will restart from scratch");
  3803       if (PrintGCDetails) {
  3804         gclog_or_tty->print_cr("bailing out to foreground collection");
  3807     if (UseAdaptiveSizePolicy) {
  3808       size_policy()->concurrent_marking_end();
  3810   } else {
  3811     assert(SafepointSynchronize::is_at_safepoint(),
  3812            "inconsistent with asynch == false");
  3813     if (UseAdaptiveSizePolicy) {
  3814       size_policy()->ms_collection_marking_begin();
  3816     // already have locks
  3817     res = markFromRootsWork(asynch);
  3818     _collectorState = FinalMarking;
  3819     if (UseAdaptiveSizePolicy) {
  3820       GenCollectedHeap* gch = GenCollectedHeap::heap();
  3821       size_policy()->ms_collection_marking_end(gch->gc_cause());
  3824   verify_overflow_empty();
  3825   return res;
  3828 bool CMSCollector::markFromRootsWork(bool asynch) {
  3829   // iterate over marked bits in bit map, doing a full scan and mark
  3830   // from these roots using the following algorithm:
  3831   // . if oop is to the right of the current scan pointer,
  3832   //   mark corresponding bit (we'll process it later)
  3833   // . else (oop is to left of current scan pointer)
  3834   //   push oop on marking stack
  3835   // . drain the marking stack
  3837   // Note that when we do a marking step we need to hold the
  3838   // bit map lock -- recall that direct allocation (by mutators)
  3839   // and promotion (by younger generation collectors) is also
  3840   // marking the bit map. [the so-called allocate live policy.]
  3841   // Because the implementation of bit map marking is not
  3842   // robust wrt simultaneous marking of bits in the same word,
  3843   // we need to make sure that there is no such interference
  3844   // between concurrent such updates.
  3846   // already have locks
  3847   assert_lock_strong(bitMapLock());
  3849   verify_work_stacks_empty();
  3850   verify_overflow_empty();
  3851   bool result = false;
  3852   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
  3853     result = do_marking_mt(asynch);
  3854   } else {
  3855     result = do_marking_st(asynch);
  3857   return result;
  3860 // Forward decl
  3861 class CMSConcMarkingTask;
  3863 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
  3864   CMSCollector*       _collector;
  3865   CMSConcMarkingTask* _task;
  3866  public:
  3867   virtual void yield();
  3869   // "n_threads" is the number of threads to be terminated.
  3870   // "queue_set" is a set of work queues of other threads.
  3871   // "collector" is the CMS collector associated with this task terminator.
  3872   // "yield" indicates whether we need the gang as a whole to yield.
  3873   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
  3874     ParallelTaskTerminator(n_threads, queue_set),
  3875     _collector(collector) { }
  3877   void set_task(CMSConcMarkingTask* task) {
  3878     _task = task;
  3880 };
  3882 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
  3883   CMSConcMarkingTask* _task;
  3884  public:
  3885   bool should_exit_termination();
  3886   void set_task(CMSConcMarkingTask* task) {
  3887     _task = task;
  3889 };
  3891 // MT Concurrent Marking Task
  3892 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
  3893   CMSCollector* _collector;
  3894   int           _n_workers;                  // requested/desired # workers
  3895   bool          _asynch;
  3896   bool          _result;
  3897   CompactibleFreeListSpace*  _cms_space;
  3898   char          _pad_front[64];   // padding to ...
  3899   HeapWord*     _global_finger;   // ... avoid sharing cache line
  3900   char          _pad_back[64];
  3901   HeapWord*     _restart_addr;
  3903   //  Exposed here for yielding support
  3904   Mutex* const _bit_map_lock;
  3906   // The per thread work queues, available here for stealing
  3907   OopTaskQueueSet*  _task_queues;
  3909   // Termination (and yielding) support
  3910   CMSConcMarkingTerminator _term;
  3911   CMSConcMarkingTerminatorTerminator _term_term;
  3913  public:
  3914   CMSConcMarkingTask(CMSCollector* collector,
  3915                  CompactibleFreeListSpace* cms_space,
  3916                  bool asynch,
  3917                  YieldingFlexibleWorkGang* workers,
  3918                  OopTaskQueueSet* task_queues):
  3919     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
  3920     _collector(collector),
  3921     _cms_space(cms_space),
  3922     _asynch(asynch), _n_workers(0), _result(true),
  3923     _task_queues(task_queues),
  3924     _term(_n_workers, task_queues, _collector),
  3925     _bit_map_lock(collector->bitMapLock())
  3927     _requested_size = _n_workers;
  3928     _term.set_task(this);
  3929     _term_term.set_task(this);
  3930     _restart_addr = _global_finger = _cms_space->bottom();
  3934   OopTaskQueueSet* task_queues()  { return _task_queues; }
  3936   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
  3938   HeapWord** global_finger_addr() { return &_global_finger; }
  3940   CMSConcMarkingTerminator* terminator() { return &_term; }
  3942   virtual void set_for_termination(int active_workers) {
  3943     terminator()->reset_for_reuse(active_workers);
  3946   void work(uint worker_id);
  3947   bool should_yield() {
  3948     return    ConcurrentMarkSweepThread::should_yield()
  3949            && !_collector->foregroundGCIsActive()
  3950            && _asynch;
  3953   virtual void coordinator_yield();  // stuff done by coordinator
  3954   bool result() { return _result; }
  3956   void reset(HeapWord* ra) {
  3957     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
  3958     _restart_addr = _global_finger = ra;
  3959     _term.reset_for_reuse();
  3962   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
  3963                                            OopTaskQueue* work_q);
  3965  private:
  3966   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
  3967   void do_work_steal(int i);
  3968   void bump_global_finger(HeapWord* f);
  3969 };
  3971 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
  3972   assert(_task != NULL, "Error");
  3973   return _task->yielding();
  3974   // Note that we do not need the disjunct || _task->should_yield() above
  3975   // because we want terminating threads to yield only if the task
  3976   // is already in the midst of yielding, which happens only after at least one
  3977   // thread has yielded.
  3980 void CMSConcMarkingTerminator::yield() {
  3981   if (_task->should_yield()) {
  3982     _task->yield();
  3983   } else {
  3984     ParallelTaskTerminator::yield();
  3988 ////////////////////////////////////////////////////////////////
  3989 // Concurrent Marking Algorithm Sketch
  3990 ////////////////////////////////////////////////////////////////
  3991 // Until all tasks exhausted (both spaces):
  3992 // -- claim next available chunk
  3993 // -- bump global finger via CAS
  3994 // -- find first object that starts in this chunk
  3995 //    and start scanning bitmap from that position
  3996 // -- scan marked objects for oops
  3997 // -- CAS-mark target, and if successful:
  3998 //    . if target oop is above global finger (volatile read)
  3999 //      nothing to do
  4000 //    . if target oop is in chunk and above local finger
  4001 //        then nothing to do
  4002 //    . else push on work-queue
  4003 // -- Deal with possible overflow issues:
  4004 //    . local work-queue overflow causes stuff to be pushed on
  4005 //      global (common) overflow queue
  4006 //    . always first empty local work queue
  4007 //    . then get a batch of oops from global work queue if any
  4008 //    . then do work stealing
  4009 // -- When all tasks claimed (both spaces)
  4010 //    and local work queue empty,
  4011 //    then in a loop do:
  4012 //    . check global overflow stack; steal a batch of oops and trace
  4013 //    . try to steal from other threads oif GOS is empty
  4014 //    . if neither is available, offer termination
  4015 // -- Terminate and return result
  4016 //
  4017 void CMSConcMarkingTask::work(uint worker_id) {
  4018   elapsedTimer _timer;
  4019   ResourceMark rm;
  4020   HandleMark hm;
  4022   DEBUG_ONLY(_collector->verify_overflow_empty();)
  4024   // Before we begin work, our work queue should be empty
  4025   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
  4026   // Scan the bitmap covering _cms_space, tracing through grey objects.
  4027   _timer.start();
  4028   do_scan_and_mark(worker_id, _cms_space);
  4029   _timer.stop();
  4030   if (PrintCMSStatistics != 0) {
  4031     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
  4032       worker_id, _timer.seconds());
  4033       // XXX: need xxx/xxx type of notation, two timers
  4036   // ... do work stealing
  4037   _timer.reset();
  4038   _timer.start();
  4039   do_work_steal(worker_id);
  4040   _timer.stop();
  4041   if (PrintCMSStatistics != 0) {
  4042     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
  4043       worker_id, _timer.seconds());
  4044       // XXX: need xxx/xxx type of notation, two timers
  4046   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
  4047   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
  4048   // Note that under the current task protocol, the
  4049   // following assertion is true even of the spaces
  4050   // expanded since the completion of the concurrent
  4051   // marking. XXX This will likely change under a strict
  4052   // ABORT semantics.
  4053   // After perm removal the comparison was changed to
  4054   // greater than or equal to from strictly greater than.
  4055   // Before perm removal the highest address sweep would
  4056   // have been at the end of perm gen but now is at the
  4057   // end of the tenured gen.
  4058   assert(_global_finger >=  _cms_space->end(),
  4059          "All tasks have been completed");
  4060   DEBUG_ONLY(_collector->verify_overflow_empty();)
  4063 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
  4064   HeapWord* read = _global_finger;
  4065   HeapWord* cur  = read;
  4066   while (f > read) {
  4067     cur = read;
  4068     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
  4069     if (cur == read) {
  4070       // our cas succeeded
  4071       assert(_global_finger >= f, "protocol consistency");
  4072       break;
  4077 // This is really inefficient, and should be redone by
  4078 // using (not yet available) block-read and -write interfaces to the
  4079 // stack and the work_queue. XXX FIX ME !!!
  4080 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
  4081                                                       OopTaskQueue* work_q) {
  4082   // Fast lock-free check
  4083   if (ovflw_stk->length() == 0) {
  4084     return false;
  4086   assert(work_q->size() == 0, "Shouldn't steal");
  4087   MutexLockerEx ml(ovflw_stk->par_lock(),
  4088                    Mutex::_no_safepoint_check_flag);
  4089   // Grab up to 1/4 the size of the work queue
  4090   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
  4091                     (size_t)ParGCDesiredObjsFromOverflowList);
  4092   num = MIN2(num, ovflw_stk->length());
  4093   for (int i = (int) num; i > 0; i--) {
  4094     oop cur = ovflw_stk->pop();
  4095     assert(cur != NULL, "Counted wrong?");
  4096     work_q->push(cur);
  4098   return num > 0;
  4101 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
  4102   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
  4103   int n_tasks = pst->n_tasks();
  4104   // We allow that there may be no tasks to do here because
  4105   // we are restarting after a stack overflow.
  4106   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
  4107   uint nth_task = 0;
  4109   HeapWord* aligned_start = sp->bottom();
  4110   if (sp->used_region().contains(_restart_addr)) {
  4111     // Align down to a card boundary for the start of 0th task
  4112     // for this space.
  4113     aligned_start =
  4114       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
  4115                                  CardTableModRefBS::card_size);
  4118   size_t chunk_size = sp->marking_task_size();
  4119   while (!pst->is_task_claimed(/* reference */ nth_task)) {
  4120     // Having claimed the nth task in this space,
  4121     // compute the chunk that it corresponds to:
  4122     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
  4123                                aligned_start + (nth_task+1)*chunk_size);
  4124     // Try and bump the global finger via a CAS;
  4125     // note that we need to do the global finger bump
  4126     // _before_ taking the intersection below, because
  4127     // the task corresponding to that region will be
  4128     // deemed done even if the used_region() expands
  4129     // because of allocation -- as it almost certainly will
  4130     // during start-up while the threads yield in the
  4131     // closure below.
  4132     HeapWord* finger = span.end();
  4133     bump_global_finger(finger);   // atomically
  4134     // There are null tasks here corresponding to chunks
  4135     // beyond the "top" address of the space.
  4136     span = span.intersection(sp->used_region());
  4137     if (!span.is_empty()) {  // Non-null task
  4138       HeapWord* prev_obj;
  4139       assert(!span.contains(_restart_addr) || nth_task == 0,
  4140              "Inconsistency");
  4141       if (nth_task == 0) {
  4142         // For the 0th task, we'll not need to compute a block_start.
  4143         if (span.contains(_restart_addr)) {
  4144           // In the case of a restart because of stack overflow,
  4145           // we might additionally skip a chunk prefix.
  4146           prev_obj = _restart_addr;
  4147         } else {
  4148           prev_obj = span.start();
  4150       } else {
  4151         // We want to skip the first object because
  4152         // the protocol is to scan any object in its entirety
  4153         // that _starts_ in this span; a fortiori, any
  4154         // object starting in an earlier span is scanned
  4155         // as part of an earlier claimed task.
  4156         // Below we use the "careful" version of block_start
  4157         // so we do not try to navigate uninitialized objects.
  4158         prev_obj = sp->block_start_careful(span.start());
  4159         // Below we use a variant of block_size that uses the
  4160         // Printezis bits to avoid waiting for allocated
  4161         // objects to become initialized/parsable.
  4162         while (prev_obj < span.start()) {
  4163           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
  4164           if (sz > 0) {
  4165             prev_obj += sz;
  4166           } else {
  4167             // In this case we may end up doing a bit of redundant
  4168             // scanning, but that appears unavoidable, short of
  4169             // locking the free list locks; see bug 6324141.
  4170             break;
  4174       if (prev_obj < span.end()) {
  4175         MemRegion my_span = MemRegion(prev_obj, span.end());
  4176         // Do the marking work within a non-empty span --
  4177         // the last argument to the constructor indicates whether the
  4178         // iteration should be incremental with periodic yields.
  4179         Par_MarkFromRootsClosure cl(this, _collector, my_span,
  4180                                     &_collector->_markBitMap,
  4181                                     work_queue(i),
  4182                                     &_collector->_markStack,
  4183                                     _asynch);
  4184         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
  4185       } // else nothing to do for this task
  4186     }   // else nothing to do for this task
  4188   // We'd be tempted to assert here that since there are no
  4189   // more tasks left to claim in this space, the global_finger
  4190   // must exceed space->top() and a fortiori space->end(). However,
  4191   // that would not quite be correct because the bumping of
  4192   // global_finger occurs strictly after the claiming of a task,
  4193   // so by the time we reach here the global finger may not yet
  4194   // have been bumped up by the thread that claimed the last
  4195   // task.
  4196   pst->all_tasks_completed();
  4199 class Par_ConcMarkingClosure: public CMSOopClosure {
  4200  private:
  4201   CMSCollector* _collector;
  4202   CMSConcMarkingTask* _task;
  4203   MemRegion     _span;
  4204   CMSBitMap*    _bit_map;
  4205   CMSMarkStack* _overflow_stack;
  4206   OopTaskQueue* _work_queue;
  4207  protected:
  4208   DO_OOP_WORK_DEFN
  4209  public:
  4210   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
  4211                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
  4212     CMSOopClosure(collector->ref_processor()),
  4213     _collector(collector),
  4214     _task(task),
  4215     _span(collector->_span),
  4216     _work_queue(work_queue),
  4217     _bit_map(bit_map),
  4218     _overflow_stack(overflow_stack)
  4219   { }
  4220   virtual void do_oop(oop* p);
  4221   virtual void do_oop(narrowOop* p);
  4223   void trim_queue(size_t max);
  4224   void handle_stack_overflow(HeapWord* lost);
  4225   void do_yield_check() {
  4226     if (_task->should_yield()) {
  4227       _task->yield();
  4230 };
  4232 // Grey object scanning during work stealing phase --
  4233 // the salient assumption here is that any references
  4234 // that are in these stolen objects being scanned must
  4235 // already have been initialized (else they would not have
  4236 // been published), so we do not need to check for
  4237 // uninitialized objects before pushing here.
  4238 void Par_ConcMarkingClosure::do_oop(oop obj) {
  4239   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
  4240   HeapWord* addr = (HeapWord*)obj;
  4241   // Check if oop points into the CMS generation
  4242   // and is not marked
  4243   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
  4244     // a white object ...
  4245     // If we manage to "claim" the object, by being the
  4246     // first thread to mark it, then we push it on our
  4247     // marking stack
  4248     if (_bit_map->par_mark(addr)) {     // ... now grey
  4249       // push on work queue (grey set)
  4250       bool simulate_overflow = false;
  4251       NOT_PRODUCT(
  4252         if (CMSMarkStackOverflowALot &&
  4253             _collector->simulate_overflow()) {
  4254           // simulate a stack overflow
  4255           simulate_overflow = true;
  4258       if (simulate_overflow ||
  4259           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
  4260         // stack overflow
  4261         if (PrintCMSStatistics != 0) {
  4262           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
  4263                                  SIZE_FORMAT, _overflow_stack->capacity());
  4265         // We cannot assert that the overflow stack is full because
  4266         // it may have been emptied since.
  4267         assert(simulate_overflow ||
  4268                _work_queue->size() == _work_queue->max_elems(),
  4269               "Else push should have succeeded");
  4270         handle_stack_overflow(addr);
  4272     } // Else, some other thread got there first
  4273     do_yield_check();
  4277 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
  4278 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
  4280 void Par_ConcMarkingClosure::trim_queue(size_t max) {
  4281   while (_work_queue->size() > max) {
  4282     oop new_oop;
  4283     if (_work_queue->pop_local(new_oop)) {
  4284       assert(new_oop->is_oop(), "Should be an oop");
  4285       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
  4286       assert(_span.contains((HeapWord*)new_oop), "Not in span");
  4287       new_oop->oop_iterate(this);  // do_oop() above
  4288       do_yield_check();
  4293 // Upon stack overflow, we discard (part of) the stack,
  4294 // remembering the least address amongst those discarded
  4295 // in CMSCollector's _restart_address.
  4296 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
  4297   // We need to do this under a mutex to prevent other
  4298   // workers from interfering with the work done below.
  4299   MutexLockerEx ml(_overflow_stack->par_lock(),
  4300                    Mutex::_no_safepoint_check_flag);
  4301   // Remember the least grey address discarded
  4302   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
  4303   _collector->lower_restart_addr(ra);
  4304   _overflow_stack->reset();  // discard stack contents
  4305   _overflow_stack->expand(); // expand the stack if possible
  4309 void CMSConcMarkingTask::do_work_steal(int i) {
  4310   OopTaskQueue* work_q = work_queue(i);
  4311   oop obj_to_scan;
  4312   CMSBitMap* bm = &(_collector->_markBitMap);
  4313   CMSMarkStack* ovflw = &(_collector->_markStack);
  4314   int* seed = _collector->hash_seed(i);
  4315   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
  4316   while (true) {
  4317     cl.trim_queue(0);
  4318     assert(work_q->size() == 0, "Should have been emptied above");
  4319     if (get_work_from_overflow_stack(ovflw, work_q)) {
  4320       // Can't assert below because the work obtained from the
  4321       // overflow stack may already have been stolen from us.
  4322       // assert(work_q->size() > 0, "Work from overflow stack");
  4323       continue;
  4324     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
  4325       assert(obj_to_scan->is_oop(), "Should be an oop");
  4326       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
  4327       obj_to_scan->oop_iterate(&cl);
  4328     } else if (terminator()->offer_termination(&_term_term)) {
  4329       assert(work_q->size() == 0, "Impossible!");
  4330       break;
  4331     } else if (yielding() || should_yield()) {
  4332       yield();
  4337 // This is run by the CMS (coordinator) thread.
  4338 void CMSConcMarkingTask::coordinator_yield() {
  4339   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  4340          "CMS thread should hold CMS token");
  4341   // First give up the locks, then yield, then re-lock
  4342   // We should probably use a constructor/destructor idiom to
  4343   // do this unlock/lock or modify the MutexUnlocker class to
  4344   // serve our purpose. XXX
  4345   assert_lock_strong(_bit_map_lock);
  4346   _bit_map_lock->unlock();
  4347   ConcurrentMarkSweepThread::desynchronize(true);
  4348   ConcurrentMarkSweepThread::acknowledge_yield_request();
  4349   _collector->stopTimer();
  4350   if (PrintCMSStatistics != 0) {
  4351     _collector->incrementYields();
  4353   _collector->icms_wait();
  4355   // It is possible for whichever thread initiated the yield request
  4356   // not to get a chance to wake up and take the bitmap lock between
  4357   // this thread releasing it and reacquiring it. So, while the
  4358   // should_yield() flag is on, let's sleep for a bit to give the
  4359   // other thread a chance to wake up. The limit imposed on the number
  4360   // of iterations is defensive, to avoid any unforseen circumstances
  4361   // putting us into an infinite loop. Since it's always been this
  4362   // (coordinator_yield()) method that was observed to cause the
  4363   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
  4364   // which is by default non-zero. For the other seven methods that
  4365   // also perform the yield operation, as are using a different
  4366   // parameter (CMSYieldSleepCount) which is by default zero. This way we
  4367   // can enable the sleeping for those methods too, if necessary.
  4368   // See 6442774.
  4369   //
  4370   // We really need to reconsider the synchronization between the GC
  4371   // thread and the yield-requesting threads in the future and we
  4372   // should really use wait/notify, which is the recommended
  4373   // way of doing this type of interaction. Additionally, we should
  4374   // consolidate the eight methods that do the yield operation and they
  4375   // are almost identical into one for better maintenability and
  4376   // readability. See 6445193.
  4377   //
  4378   // Tony 2006.06.29
  4379   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
  4380                    ConcurrentMarkSweepThread::should_yield() &&
  4381                    !CMSCollector::foregroundGCIsActive(); ++i) {
  4382     os::sleep(Thread::current(), 1, false);
  4383     ConcurrentMarkSweepThread::acknowledge_yield_request();
  4386   ConcurrentMarkSweepThread::synchronize(true);
  4387   _bit_map_lock->lock_without_safepoint_check();
  4388   _collector->startTimer();
  4391 bool CMSCollector::do_marking_mt(bool asynch) {
  4392   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
  4393   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
  4394                                        conc_workers()->total_workers(),
  4395                                        conc_workers()->active_workers(),
  4396                                        Threads::number_of_non_daemon_threads());
  4397   conc_workers()->set_active_workers(num_workers);
  4399   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
  4401   CMSConcMarkingTask tsk(this,
  4402                          cms_space,
  4403                          asynch,
  4404                          conc_workers(),
  4405                          task_queues());
  4407   // Since the actual number of workers we get may be different
  4408   // from the number we requested above, do we need to do anything different
  4409   // below? In particular, may be we need to subclass the SequantialSubTasksDone
  4410   // class?? XXX
  4411   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
  4413   // Refs discovery is already non-atomic.
  4414   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
  4415   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
  4416   conc_workers()->start_task(&tsk);
  4417   while (tsk.yielded()) {
  4418     tsk.coordinator_yield();
  4419     conc_workers()->continue_task(&tsk);
  4421   // If the task was aborted, _restart_addr will be non-NULL
  4422   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
  4423   while (_restart_addr != NULL) {
  4424     // XXX For now we do not make use of ABORTED state and have not
  4425     // yet implemented the right abort semantics (even in the original
  4426     // single-threaded CMS case). That needs some more investigation
  4427     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
  4428     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
  4429     // If _restart_addr is non-NULL, a marking stack overflow
  4430     // occurred; we need to do a fresh marking iteration from the
  4431     // indicated restart address.
  4432     if (_foregroundGCIsActive && asynch) {
  4433       // We may be running into repeated stack overflows, having
  4434       // reached the limit of the stack size, while making very
  4435       // slow forward progress. It may be best to bail out and
  4436       // let the foreground collector do its job.
  4437       // Clear _restart_addr, so that foreground GC
  4438       // works from scratch. This avoids the headache of
  4439       // a "rescan" which would otherwise be needed because
  4440       // of the dirty mod union table & card table.
  4441       _restart_addr = NULL;
  4442       return false;
  4444     // Adjust the task to restart from _restart_addr
  4445     tsk.reset(_restart_addr);
  4446     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
  4447                   _restart_addr);
  4448     _restart_addr = NULL;
  4449     // Get the workers going again
  4450     conc_workers()->start_task(&tsk);
  4451     while (tsk.yielded()) {
  4452       tsk.coordinator_yield();
  4453       conc_workers()->continue_task(&tsk);
  4456   assert(tsk.completed(), "Inconsistency");
  4457   assert(tsk.result() == true, "Inconsistency");
  4458   return true;
  4461 bool CMSCollector::do_marking_st(bool asynch) {
  4462   ResourceMark rm;
  4463   HandleMark   hm;
  4465   // Temporarily make refs discovery single threaded (non-MT)
  4466   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
  4467   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
  4468     &_markStack, CMSYield && asynch);
  4469   // the last argument to iterate indicates whether the iteration
  4470   // should be incremental with periodic yields.
  4471   _markBitMap.iterate(&markFromRootsClosure);
  4472   // If _restart_addr is non-NULL, a marking stack overflow
  4473   // occurred; we need to do a fresh iteration from the
  4474   // indicated restart address.
  4475   while (_restart_addr != NULL) {
  4476     if (_foregroundGCIsActive && asynch) {
  4477       // We may be running into repeated stack overflows, having
  4478       // reached the limit of the stack size, while making very
  4479       // slow forward progress. It may be best to bail out and
  4480       // let the foreground collector do its job.
  4481       // Clear _restart_addr, so that foreground GC
  4482       // works from scratch. This avoids the headache of
  4483       // a "rescan" which would otherwise be needed because
  4484       // of the dirty mod union table & card table.
  4485       _restart_addr = NULL;
  4486       return false;  // indicating failure to complete marking
  4488     // Deal with stack overflow:
  4489     // we restart marking from _restart_addr
  4490     HeapWord* ra = _restart_addr;
  4491     markFromRootsClosure.reset(ra);
  4492     _restart_addr = NULL;
  4493     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
  4495   return true;
  4498 void CMSCollector::preclean() {
  4499   check_correct_thread_executing();
  4500   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
  4501   verify_work_stacks_empty();
  4502   verify_overflow_empty();
  4503   _abort_preclean = false;
  4504   if (CMSPrecleaningEnabled) {
  4505     if (!CMSEdenChunksRecordAlways) {
  4506       _eden_chunk_index = 0;
  4508     size_t used = get_eden_used();
  4509     size_t capacity = get_eden_capacity();
  4510     // Don't start sampling unless we will get sufficiently
  4511     // many samples.
  4512     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
  4513                 * CMSScheduleRemarkEdenPenetration)) {
  4514       _start_sampling = true;
  4515     } else {
  4516       _start_sampling = false;
  4518     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  4519     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
  4520     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
  4522   CMSTokenSync x(true); // is cms thread
  4523   if (CMSPrecleaningEnabled) {
  4524     sample_eden();
  4525     _collectorState = AbortablePreclean;
  4526   } else {
  4527     _collectorState = FinalMarking;
  4529   verify_work_stacks_empty();
  4530   verify_overflow_empty();
  4533 // Try and schedule the remark such that young gen
  4534 // occupancy is CMSScheduleRemarkEdenPenetration %.
  4535 void CMSCollector::abortable_preclean() {
  4536   check_correct_thread_executing();
  4537   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
  4538   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
  4540   // If Eden's current occupancy is below this threshold,
  4541   // immediately schedule the remark; else preclean
  4542   // past the next scavenge in an effort to
  4543   // schedule the pause as described avove. By choosing
  4544   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
  4545   // we will never do an actual abortable preclean cycle.
  4546   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
  4547     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  4548     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
  4549     // We need more smarts in the abortable preclean
  4550     // loop below to deal with cases where allocation
  4551     // in young gen is very very slow, and our precleaning
  4552     // is running a losing race against a horde of
  4553     // mutators intent on flooding us with CMS updates
  4554     // (dirty cards).
  4555     // One, admittedly dumb, strategy is to give up
  4556     // after a certain number of abortable precleaning loops
  4557     // or after a certain maximum time. We want to make
  4558     // this smarter in the next iteration.
  4559     // XXX FIX ME!!! YSR
  4560     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
  4561     while (!(should_abort_preclean() ||
  4562              ConcurrentMarkSweepThread::should_terminate())) {
  4563       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
  4564       cumworkdone += workdone;
  4565       loops++;
  4566       // Voluntarily terminate abortable preclean phase if we have
  4567       // been at it for too long.
  4568       if ((CMSMaxAbortablePrecleanLoops != 0) &&
  4569           loops >= CMSMaxAbortablePrecleanLoops) {
  4570         if (PrintGCDetails) {
  4571           gclog_or_tty->print(" CMS: abort preclean due to loops ");
  4573         break;
  4575       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
  4576         if (PrintGCDetails) {
  4577           gclog_or_tty->print(" CMS: abort preclean due to time ");
  4579         break;
  4581       // If we are doing little work each iteration, we should
  4582       // take a short break.
  4583       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
  4584         // Sleep for some time, waiting for work to accumulate
  4585         stopTimer();
  4586         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
  4587         startTimer();
  4588         waited++;
  4591     if (PrintCMSStatistics > 0) {
  4592       gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
  4593                           loops, waited, cumworkdone);
  4596   CMSTokenSync x(true); // is cms thread
  4597   if (_collectorState != Idling) {
  4598     assert(_collectorState == AbortablePreclean,
  4599            "Spontaneous state transition?");
  4600     _collectorState = FinalMarking;
  4601   } // Else, a foreground collection completed this CMS cycle.
  4602   return;
  4605 // Respond to an Eden sampling opportunity
  4606 void CMSCollector::sample_eden() {
  4607   // Make sure a young gc cannot sneak in between our
  4608   // reading and recording of a sample.
  4609   assert(Thread::current()->is_ConcurrentGC_thread(),
  4610          "Only the cms thread may collect Eden samples");
  4611   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  4612          "Should collect samples while holding CMS token");
  4613   if (!_start_sampling) {
  4614     return;
  4616   // When CMSEdenChunksRecordAlways is true, the eden chunk array
  4617   // is populated by the young generation.
  4618   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
  4619     if (_eden_chunk_index < _eden_chunk_capacity) {
  4620       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
  4621       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
  4622              "Unexpected state of Eden");
  4623       // We'd like to check that what we just sampled is an oop-start address;
  4624       // however, we cannot do that here since the object may not yet have been
  4625       // initialized. So we'll instead do the check when we _use_ this sample
  4626       // later.
  4627       if (_eden_chunk_index == 0 ||
  4628           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
  4629                          _eden_chunk_array[_eden_chunk_index-1])
  4630            >= CMSSamplingGrain)) {
  4631         _eden_chunk_index++;  // commit sample
  4635   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
  4636     size_t used = get_eden_used();
  4637     size_t capacity = get_eden_capacity();
  4638     assert(used <= capacity, "Unexpected state of Eden");
  4639     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
  4640       _abort_preclean = true;
  4646 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
  4647   assert(_collectorState == Precleaning ||
  4648          _collectorState == AbortablePreclean, "incorrect state");
  4649   ResourceMark rm;
  4650   HandleMark   hm;
  4652   // Precleaning is currently not MT but the reference processor
  4653   // may be set for MT.  Disable it temporarily here.
  4654   ReferenceProcessor* rp = ref_processor();
  4655   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
  4657   // Do one pass of scrubbing the discovered reference lists
  4658   // to remove any reference objects with strongly-reachable
  4659   // referents.
  4660   if (clean_refs) {
  4661     CMSPrecleanRefsYieldClosure yield_cl(this);
  4662     assert(rp->span().equals(_span), "Spans should be equal");
  4663     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
  4664                                    &_markStack, true /* preclean */);
  4665     CMSDrainMarkingStackClosure complete_trace(this,
  4666                                    _span, &_markBitMap, &_markStack,
  4667                                    &keep_alive, true /* preclean */);
  4669     // We don't want this step to interfere with a young
  4670     // collection because we don't want to take CPU
  4671     // or memory bandwidth away from the young GC threads
  4672     // (which may be as many as there are CPUs).
  4673     // Note that we don't need to protect ourselves from
  4674     // interference with mutators because they can't
  4675     // manipulate the discovered reference lists nor affect
  4676     // the computed reachability of the referents, the
  4677     // only properties manipulated by the precleaning
  4678     // of these reference lists.
  4679     stopTimer();
  4680     CMSTokenSyncWithLocks x(true /* is cms thread */,
  4681                             bitMapLock());
  4682     startTimer();
  4683     sample_eden();
  4685     // The following will yield to allow foreground
  4686     // collection to proceed promptly. XXX YSR:
  4687     // The code in this method may need further
  4688     // tweaking for better performance and some restructuring
  4689     // for cleaner interfaces.
  4690     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
  4691     rp->preclean_discovered_references(
  4692           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
  4693           gc_timer);
  4696   if (clean_survivor) {  // preclean the active survivor space(s)
  4697     assert(_young_gen->kind() == Generation::DefNew ||
  4698            _young_gen->kind() == Generation::ParNew ||
  4699            _young_gen->kind() == Generation::ASParNew,
  4700          "incorrect type for cast");
  4701     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
  4702     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
  4703                              &_markBitMap, &_modUnionTable,
  4704                              &_markStack, true /* precleaning phase */);
  4705     stopTimer();
  4706     CMSTokenSyncWithLocks ts(true /* is cms thread */,
  4707                              bitMapLock());
  4708     startTimer();
  4709     unsigned int before_count =
  4710       GenCollectedHeap::heap()->total_collections();
  4711     SurvivorSpacePrecleanClosure
  4712       sss_cl(this, _span, &_markBitMap, &_markStack,
  4713              &pam_cl, before_count, CMSYield);
  4714     dng->from()->object_iterate_careful(&sss_cl);
  4715     dng->to()->object_iterate_careful(&sss_cl);
  4717   MarkRefsIntoAndScanClosure
  4718     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
  4719              &_markStack, this, CMSYield,
  4720              true /* precleaning phase */);
  4721   // CAUTION: The following closure has persistent state that may need to
  4722   // be reset upon a decrease in the sequence of addresses it
  4723   // processes.
  4724   ScanMarkedObjectsAgainCarefullyClosure
  4725     smoac_cl(this, _span,
  4726       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
  4728   // Preclean dirty cards in ModUnionTable and CardTable using
  4729   // appropriate convergence criterion;
  4730   // repeat CMSPrecleanIter times unless we find that
  4731   // we are losing.
  4732   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
  4733   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
  4734          "Bad convergence multiplier");
  4735   assert(CMSPrecleanThreshold >= 100,
  4736          "Unreasonably low CMSPrecleanThreshold");
  4738   size_t numIter, cumNumCards, lastNumCards, curNumCards;
  4739   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
  4740        numIter < CMSPrecleanIter;
  4741        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
  4742     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
  4743     if (Verbose && PrintGCDetails) {
  4744       gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
  4746     // Either there are very few dirty cards, so re-mark
  4747     // pause will be small anyway, or our pre-cleaning isn't
  4748     // that much faster than the rate at which cards are being
  4749     // dirtied, so we might as well stop and re-mark since
  4750     // precleaning won't improve our re-mark time by much.
  4751     if (curNumCards <= CMSPrecleanThreshold ||
  4752         (numIter > 0 &&
  4753          (curNumCards * CMSPrecleanDenominator >
  4754          lastNumCards * CMSPrecleanNumerator))) {
  4755       numIter++;
  4756       cumNumCards += curNumCards;
  4757       break;
  4761   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
  4763   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
  4764   cumNumCards += curNumCards;
  4765   if (PrintGCDetails && PrintCMSStatistics != 0) {
  4766     gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
  4767                   curNumCards, cumNumCards, numIter);
  4769   return cumNumCards;   // as a measure of useful work done
  4772 // PRECLEANING NOTES:
  4773 // Precleaning involves:
  4774 // . reading the bits of the modUnionTable and clearing the set bits.
  4775 // . For the cards corresponding to the set bits, we scan the
  4776 //   objects on those cards. This means we need the free_list_lock
  4777 //   so that we can safely iterate over the CMS space when scanning
  4778 //   for oops.
  4779 // . When we scan the objects, we'll be both reading and setting
  4780 //   marks in the marking bit map, so we'll need the marking bit map.
  4781 // . For protecting _collector_state transitions, we take the CGC_lock.
  4782 //   Note that any races in the reading of of card table entries by the
  4783 //   CMS thread on the one hand and the clearing of those entries by the
  4784 //   VM thread or the setting of those entries by the mutator threads on the
  4785 //   other are quite benign. However, for efficiency it makes sense to keep
  4786 //   the VM thread from racing with the CMS thread while the latter is
  4787 //   dirty card info to the modUnionTable. We therefore also use the
  4788 //   CGC_lock to protect the reading of the card table and the mod union
  4789 //   table by the CM thread.
  4790 // . We run concurrently with mutator updates, so scanning
  4791 //   needs to be done carefully  -- we should not try to scan
  4792 //   potentially uninitialized objects.
  4793 //
  4794 // Locking strategy: While holding the CGC_lock, we scan over and
  4795 // reset a maximal dirty range of the mod union / card tables, then lock
  4796 // the free_list_lock and bitmap lock to do a full marking, then
  4797 // release these locks; and repeat the cycle. This allows for a
  4798 // certain amount of fairness in the sharing of these locks between
  4799 // the CMS collector on the one hand, and the VM thread and the
  4800 // mutators on the other.
  4802 // NOTE: preclean_mod_union_table() and preclean_card_table()
  4803 // further below are largely identical; if you need to modify
  4804 // one of these methods, please check the other method too.
  4806 size_t CMSCollector::preclean_mod_union_table(
  4807   ConcurrentMarkSweepGeneration* gen,
  4808   ScanMarkedObjectsAgainCarefullyClosure* cl) {
  4809   verify_work_stacks_empty();
  4810   verify_overflow_empty();
  4812   // strategy: starting with the first card, accumulate contiguous
  4813   // ranges of dirty cards; clear these cards, then scan the region
  4814   // covered by these cards.
  4816   // Since all of the MUT is committed ahead, we can just use
  4817   // that, in case the generations expand while we are precleaning.
  4818   // It might also be fine to just use the committed part of the
  4819   // generation, but we might potentially miss cards when the
  4820   // generation is rapidly expanding while we are in the midst
  4821   // of precleaning.
  4822   HeapWord* startAddr = gen->reserved().start();
  4823   HeapWord* endAddr   = gen->reserved().end();
  4825   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
  4827   size_t numDirtyCards, cumNumDirtyCards;
  4828   HeapWord *nextAddr, *lastAddr;
  4829   for (cumNumDirtyCards = numDirtyCards = 0,
  4830        nextAddr = lastAddr = startAddr;
  4831        nextAddr < endAddr;
  4832        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
  4834     ResourceMark rm;
  4835     HandleMark   hm;
  4837     MemRegion dirtyRegion;
  4839       stopTimer();
  4840       // Potential yield point
  4841       CMSTokenSync ts(true);
  4842       startTimer();
  4843       sample_eden();
  4844       // Get dirty region starting at nextOffset (inclusive),
  4845       // simultaneously clearing it.
  4846       dirtyRegion =
  4847         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
  4848       assert(dirtyRegion.start() >= nextAddr,
  4849              "returned region inconsistent?");
  4851     // Remember where the next search should begin.
  4852     // The returned region (if non-empty) is a right open interval,
  4853     // so lastOffset is obtained from the right end of that
  4854     // interval.
  4855     lastAddr = dirtyRegion.end();
  4856     // Should do something more transparent and less hacky XXX
  4857     numDirtyCards =
  4858       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
  4860     // We'll scan the cards in the dirty region (with periodic
  4861     // yields for foreground GC as needed).
  4862     if (!dirtyRegion.is_empty()) {
  4863       assert(numDirtyCards > 0, "consistency check");
  4864       HeapWord* stop_point = NULL;
  4865       stopTimer();
  4866       // Potential yield point
  4867       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
  4868                                bitMapLock());
  4869       startTimer();
  4871         verify_work_stacks_empty();
  4872         verify_overflow_empty();
  4873         sample_eden();
  4874         stop_point =
  4875           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
  4877       if (stop_point != NULL) {
  4878         // The careful iteration stopped early either because it found an
  4879         // uninitialized object, or because we were in the midst of an
  4880         // "abortable preclean", which should now be aborted. Redirty
  4881         // the bits corresponding to the partially-scanned or unscanned
  4882         // cards. We'll either restart at the next block boundary or
  4883         // abort the preclean.
  4884         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
  4885                "Should only be AbortablePreclean.");
  4886         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
  4887         if (should_abort_preclean()) {
  4888           break; // out of preclean loop
  4889         } else {
  4890           // Compute the next address at which preclean should pick up;
  4891           // might need bitMapLock in order to read P-bits.
  4892           lastAddr = next_card_start_after_block(stop_point);
  4895     } else {
  4896       assert(lastAddr == endAddr, "consistency check");
  4897       assert(numDirtyCards == 0, "consistency check");
  4898       break;
  4901   verify_work_stacks_empty();
  4902   verify_overflow_empty();
  4903   return cumNumDirtyCards;
  4906 // NOTE: preclean_mod_union_table() above and preclean_card_table()
  4907 // below are largely identical; if you need to modify
  4908 // one of these methods, please check the other method too.
  4910 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
  4911   ScanMarkedObjectsAgainCarefullyClosure* cl) {
  4912   // strategy: it's similar to precleamModUnionTable above, in that
  4913   // we accumulate contiguous ranges of dirty cards, mark these cards
  4914   // precleaned, then scan the region covered by these cards.
  4915   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
  4916   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
  4918   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
  4920   size_t numDirtyCards, cumNumDirtyCards;
  4921   HeapWord *lastAddr, *nextAddr;
  4923   for (cumNumDirtyCards = numDirtyCards = 0,
  4924        nextAddr = lastAddr = startAddr;
  4925        nextAddr < endAddr;
  4926        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
  4928     ResourceMark rm;
  4929     HandleMark   hm;
  4931     MemRegion dirtyRegion;
  4933       // See comments in "Precleaning notes" above on why we
  4934       // do this locking. XXX Could the locking overheads be
  4935       // too high when dirty cards are sparse? [I don't think so.]
  4936       stopTimer();
  4937       CMSTokenSync x(true); // is cms thread
  4938       startTimer();
  4939       sample_eden();
  4940       // Get and clear dirty region from card table
  4941       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
  4942                                     MemRegion(nextAddr, endAddr),
  4943                                     true,
  4944                                     CardTableModRefBS::precleaned_card_val());
  4946       assert(dirtyRegion.start() >= nextAddr,
  4947              "returned region inconsistent?");
  4949     lastAddr = dirtyRegion.end();
  4950     numDirtyCards =
  4951       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
  4953     if (!dirtyRegion.is_empty()) {
  4954       stopTimer();
  4955       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
  4956       startTimer();
  4957       sample_eden();
  4958       verify_work_stacks_empty();
  4959       verify_overflow_empty();
  4960       HeapWord* stop_point =
  4961         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
  4962       if (stop_point != NULL) {
  4963         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
  4964                "Should only be AbortablePreclean.");
  4965         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
  4966         if (should_abort_preclean()) {
  4967           break; // out of preclean loop
  4968         } else {
  4969           // Compute the next address at which preclean should pick up.
  4970           lastAddr = next_card_start_after_block(stop_point);
  4973     } else {
  4974       break;
  4977   verify_work_stacks_empty();
  4978   verify_overflow_empty();
  4979   return cumNumDirtyCards;
  4982 class PrecleanKlassClosure : public KlassClosure {
  4983   CMKlassClosure _cm_klass_closure;
  4984  public:
  4985   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
  4986   void do_klass(Klass* k) {
  4987     if (k->has_accumulated_modified_oops()) {
  4988       k->clear_accumulated_modified_oops();
  4990       _cm_klass_closure.do_klass(k);
  4993 };
  4995 // The freelist lock is needed to prevent asserts, is it really needed?
  4996 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
  4998   cl->set_freelistLock(freelistLock);
  5000   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
  5002   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
  5003   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
  5004   PrecleanKlassClosure preclean_klass_closure(cl);
  5005   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
  5007   verify_work_stacks_empty();
  5008   verify_overflow_empty();
  5011 void CMSCollector::checkpointRootsFinal(bool asynch,
  5012   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
  5013   assert(_collectorState == FinalMarking, "incorrect state transition?");
  5014   check_correct_thread_executing();
  5015   // world is stopped at this checkpoint
  5016   assert(SafepointSynchronize::is_at_safepoint(),
  5017          "world should be stopped");
  5018   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
  5020   verify_work_stacks_empty();
  5021   verify_overflow_empty();
  5023   SpecializationStats::clear();
  5024   if (PrintGCDetails) {
  5025     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
  5026                         _young_gen->used() / K,
  5027                         _young_gen->capacity() / K);
  5029   if (asynch) {
  5030     if (CMSScavengeBeforeRemark) {
  5031       GenCollectedHeap* gch = GenCollectedHeap::heap();
  5032       // Temporarily set flag to false, GCH->do_collection will
  5033       // expect it to be false and set to true
  5034       FlagSetting fl(gch->_is_gc_active, false);
  5035       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
  5036         PrintGCDetails && Verbose, true, _gc_timer_cm);)
  5037       int level = _cmsGen->level() - 1;
  5038       if (level >= 0) {
  5039         gch->do_collection(true,        // full (i.e. force, see below)
  5040                            false,       // !clear_all_soft_refs
  5041                            0,           // size
  5042                            false,       // is_tlab
  5043                            level        // max_level
  5044                           );
  5047     FreelistLocker x(this);
  5048     MutexLockerEx y(bitMapLock(),
  5049                     Mutex::_no_safepoint_check_flag);
  5050     assert(!init_mark_was_synchronous, "but that's impossible!");
  5051     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
  5052   } else {
  5053     // already have all the locks
  5054     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
  5055                              init_mark_was_synchronous);
  5057   verify_work_stacks_empty();
  5058   verify_overflow_empty();
  5059   SpecializationStats::print();
  5062 void CMSCollector::checkpointRootsFinalWork(bool asynch,
  5063   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
  5065   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
  5067   assert(haveFreelistLocks(), "must have free list locks");
  5068   assert_lock_strong(bitMapLock());
  5070   if (UseAdaptiveSizePolicy) {
  5071     size_policy()->checkpoint_roots_final_begin();
  5074   ResourceMark rm;
  5075   HandleMark   hm;
  5077   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5079   if (should_unload_classes()) {
  5080     CodeCache::gc_prologue();
  5082   assert(haveFreelistLocks(), "must have free list locks");
  5083   assert_lock_strong(bitMapLock());
  5085   if (!init_mark_was_synchronous) {
  5086     // We might assume that we need not fill TLAB's when
  5087     // CMSScavengeBeforeRemark is set, because we may have just done
  5088     // a scavenge which would have filled all TLAB's -- and besides
  5089     // Eden would be empty. This however may not always be the case --
  5090     // for instance although we asked for a scavenge, it may not have
  5091     // happened because of a JNI critical section. We probably need
  5092     // a policy for deciding whether we can in that case wait until
  5093     // the critical section releases and then do the remark following
  5094     // the scavenge, and skip it here. In the absence of that policy,
  5095     // or of an indication of whether the scavenge did indeed occur,
  5096     // we cannot rely on TLAB's having been filled and must do
  5097     // so here just in case a scavenge did not happen.
  5098     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
  5099     // Update the saved marks which may affect the root scans.
  5100     gch->save_marks();
  5102     if (CMSPrintEdenSurvivorChunks) {
  5103       print_eden_and_survivor_chunk_arrays();
  5107       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
  5109       // Note on the role of the mod union table:
  5110       // Since the marker in "markFromRoots" marks concurrently with
  5111       // mutators, it is possible for some reachable objects not to have been
  5112       // scanned. For instance, an only reference to an object A was
  5113       // placed in object B after the marker scanned B. Unless B is rescanned,
  5114       // A would be collected. Such updates to references in marked objects
  5115       // are detected via the mod union table which is the set of all cards
  5116       // dirtied since the first checkpoint in this GC cycle and prior to
  5117       // the most recent young generation GC, minus those cleaned up by the
  5118       // concurrent precleaning.
  5119       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
  5120         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
  5121         do_remark_parallel();
  5122       } else {
  5123         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
  5124                     _gc_timer_cm);
  5125         do_remark_non_parallel();
  5128   } else {
  5129     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
  5130     // The initial mark was stop-world, so there's no rescanning to
  5131     // do; go straight on to the next step below.
  5133   verify_work_stacks_empty();
  5134   verify_overflow_empty();
  5137     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
  5138     refProcessingWork(asynch, clear_all_soft_refs);
  5140   verify_work_stacks_empty();
  5141   verify_overflow_empty();
  5143   if (should_unload_classes()) {
  5144     CodeCache::gc_epilogue();
  5146   JvmtiExport::gc_epilogue();
  5148   // If we encountered any (marking stack / work queue) overflow
  5149   // events during the current CMS cycle, take appropriate
  5150   // remedial measures, where possible, so as to try and avoid
  5151   // recurrence of that condition.
  5152   assert(_markStack.isEmpty(), "No grey objects");
  5153   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
  5154                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
  5155   if (ser_ovflw > 0) {
  5156     if (PrintCMSStatistics != 0) {
  5157       gclog_or_tty->print_cr("Marking stack overflow (benign) "
  5158         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
  5159         ", kac_preclean="SIZE_FORMAT")",
  5160         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
  5161         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
  5163     _markStack.expand();
  5164     _ser_pmc_remark_ovflw = 0;
  5165     _ser_pmc_preclean_ovflw = 0;
  5166     _ser_kac_preclean_ovflw = 0;
  5167     _ser_kac_ovflw = 0;
  5169   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
  5170     if (PrintCMSStatistics != 0) {
  5171       gclog_or_tty->print_cr("Work queue overflow (benign) "
  5172         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
  5173         _par_pmc_remark_ovflw, _par_kac_ovflw);
  5175     _par_pmc_remark_ovflw = 0;
  5176     _par_kac_ovflw = 0;
  5178   if (PrintCMSStatistics != 0) {
  5179      if (_markStack._hit_limit > 0) {
  5180        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
  5181                               _markStack._hit_limit);
  5183      if (_markStack._failed_double > 0) {
  5184        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
  5185                               " current capacity "SIZE_FORMAT,
  5186                               _markStack._failed_double,
  5187                               _markStack.capacity());
  5190   _markStack._hit_limit = 0;
  5191   _markStack._failed_double = 0;
  5193   if ((VerifyAfterGC || VerifyDuringGC) &&
  5194       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  5195     verify_after_remark();
  5198   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
  5200   // Change under the freelistLocks.
  5201   _collectorState = Sweeping;
  5202   // Call isAllClear() under bitMapLock
  5203   assert(_modUnionTable.isAllClear(),
  5204       "Should be clear by end of the final marking");
  5205   assert(_ct->klass_rem_set()->mod_union_is_clear(),
  5206       "Should be clear by end of the final marking");
  5207   if (UseAdaptiveSizePolicy) {
  5208     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
  5212 void CMSParInitialMarkTask::work(uint worker_id) {
  5213   elapsedTimer _timer;
  5214   ResourceMark rm;
  5215   HandleMark   hm;
  5217   // ---------- scan from roots --------------
  5218   _timer.start();
  5219   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5220   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
  5221   CMKlassClosure klass_closure(&par_mri_cl);
  5223   // ---------- young gen roots --------------
  5225     work_on_young_gen_roots(worker_id, &par_mri_cl);
  5226     _timer.stop();
  5227     if (PrintCMSStatistics != 0) {
  5228       gclog_or_tty->print_cr(
  5229         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
  5230         worker_id, _timer.seconds());
  5234   // ---------- remaining roots --------------
  5235   _timer.reset();
  5236   _timer.start();
  5237   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
  5238                                 false,     // yg was scanned above
  5239                                 false,     // this is parallel code
  5240                                 false,     // not scavenging
  5241                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
  5242                                 &par_mri_cl,
  5243                                 true,   // walk all of code cache if (so & SO_CodeCache)
  5244                                 NULL,
  5245                                 &klass_closure);
  5246   assert(_collector->should_unload_classes()
  5247          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
  5248          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
  5249   _timer.stop();
  5250   if (PrintCMSStatistics != 0) {
  5251     gclog_or_tty->print_cr(
  5252       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
  5253       worker_id, _timer.seconds());
  5257 // Parallel remark task
  5258 class CMSParRemarkTask: public CMSParMarkTask {
  5259   CompactibleFreeListSpace* _cms_space;
  5261   // The per-thread work queues, available here for stealing.
  5262   OopTaskQueueSet*       _task_queues;
  5263   ParallelTaskTerminator _term;
  5265  public:
  5266   // A value of 0 passed to n_workers will cause the number of
  5267   // workers to be taken from the active workers in the work gang.
  5268   CMSParRemarkTask(CMSCollector* collector,
  5269                    CompactibleFreeListSpace* cms_space,
  5270                    int n_workers, FlexibleWorkGang* workers,
  5271                    OopTaskQueueSet* task_queues):
  5272     CMSParMarkTask("Rescan roots and grey objects in parallel",
  5273                    collector, n_workers),
  5274     _cms_space(cms_space),
  5275     _task_queues(task_queues),
  5276     _term(n_workers, task_queues) { }
  5278   OopTaskQueueSet* task_queues() { return _task_queues; }
  5280   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
  5282   ParallelTaskTerminator* terminator() { return &_term; }
  5283   int n_workers() { return _n_workers; }
  5285   void work(uint worker_id);
  5287  private:
  5288   // ... of  dirty cards in old space
  5289   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
  5290                                   Par_MarkRefsIntoAndScanClosure* cl);
  5292   // ... work stealing for the above
  5293   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
  5294 };
  5296 class RemarkKlassClosure : public KlassClosure {
  5297   CMKlassClosure _cm_klass_closure;
  5298  public:
  5299   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
  5300   void do_klass(Klass* k) {
  5301     // Check if we have modified any oops in the Klass during the concurrent marking.
  5302     if (k->has_accumulated_modified_oops()) {
  5303       k->clear_accumulated_modified_oops();
  5305       // We could have transfered the current modified marks to the accumulated marks,
  5306       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
  5307     } else if (k->has_modified_oops()) {
  5308       // Don't clear anything, this info is needed by the next young collection.
  5309     } else {
  5310       // No modified oops in the Klass.
  5311       return;
  5314     // The klass has modified fields, need to scan the klass.
  5315     _cm_klass_closure.do_klass(k);
  5317 };
  5319 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
  5320   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
  5321   EdenSpace* eden_space = dng->eden();
  5322   ContiguousSpace* from_space = dng->from();
  5323   ContiguousSpace* to_space   = dng->to();
  5325   HeapWord** eca = _collector->_eden_chunk_array;
  5326   size_t     ect = _collector->_eden_chunk_index;
  5327   HeapWord** sca = _collector->_survivor_chunk_array;
  5328   size_t     sct = _collector->_survivor_chunk_index;
  5330   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
  5331   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
  5333   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
  5334   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
  5335   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
  5338 // work_queue(i) is passed to the closure
  5339 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
  5340 // also is passed to do_dirty_card_rescan_tasks() and to
  5341 // do_work_steal() to select the i-th task_queue.
  5343 void CMSParRemarkTask::work(uint worker_id) {
  5344   elapsedTimer _timer;
  5345   ResourceMark rm;
  5346   HandleMark   hm;
  5348   // ---------- rescan from roots --------------
  5349   _timer.start();
  5350   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5351   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
  5352     _collector->_span, _collector->ref_processor(),
  5353     &(_collector->_markBitMap),
  5354     work_queue(worker_id));
  5356   // Rescan young gen roots first since these are likely
  5357   // coarsely partitioned and may, on that account, constitute
  5358   // the critical path; thus, it's best to start off that
  5359   // work first.
  5360   // ---------- young gen roots --------------
  5362     work_on_young_gen_roots(worker_id, &par_mrias_cl);
  5363     _timer.stop();
  5364     if (PrintCMSStatistics != 0) {
  5365       gclog_or_tty->print_cr(
  5366         "Finished young gen rescan work in %dth thread: %3.3f sec",
  5367         worker_id, _timer.seconds());
  5371   // ---------- remaining roots --------------
  5372   _timer.reset();
  5373   _timer.start();
  5374   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
  5375                                 false,     // yg was scanned above
  5376                                 false,     // this is parallel code
  5377                                 false,     // not scavenging
  5378                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
  5379                                 &par_mrias_cl,
  5380                                 true,   // walk all of code cache if (so & SO_CodeCache)
  5381                                 NULL,
  5382                                 NULL);     // The dirty klasses will be handled below
  5383   assert(_collector->should_unload_classes()
  5384          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
  5385          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
  5386   _timer.stop();
  5387   if (PrintCMSStatistics != 0) {
  5388     gclog_or_tty->print_cr(
  5389       "Finished remaining root rescan work in %dth thread: %3.3f sec",
  5390       worker_id, _timer.seconds());
  5393   // ---------- unhandled CLD scanning ----------
  5394   if (worker_id == 0) { // Single threaded at the moment.
  5395     _timer.reset();
  5396     _timer.start();
  5398     // Scan all new class loader data objects and new dependencies that were
  5399     // introduced during concurrent marking.
  5400     ResourceMark rm;
  5401     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
  5402     for (int i = 0; i < array->length(); i++) {
  5403       par_mrias_cl.do_class_loader_data(array->at(i));
  5406     // We don't need to keep track of new CLDs anymore.
  5407     ClassLoaderDataGraph::remember_new_clds(false);
  5409     _timer.stop();
  5410     if (PrintCMSStatistics != 0) {
  5411       gclog_or_tty->print_cr(
  5412           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
  5413           worker_id, _timer.seconds());
  5417   // ---------- dirty klass scanning ----------
  5418   if (worker_id == 0) { // Single threaded at the moment.
  5419     _timer.reset();
  5420     _timer.start();
  5422     // Scan all classes that was dirtied during the concurrent marking phase.
  5423     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
  5424     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
  5426     _timer.stop();
  5427     if (PrintCMSStatistics != 0) {
  5428       gclog_or_tty->print_cr(
  5429           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
  5430           worker_id, _timer.seconds());
  5434   // We might have added oops to ClassLoaderData::_handles during the
  5435   // concurrent marking phase. These oops point to newly allocated objects
  5436   // that are guaranteed to be kept alive. Either by the direct allocation
  5437   // code, or when the young collector processes the strong roots. Hence,
  5438   // we don't have to revisit the _handles block during the remark phase.
  5440   // ---------- rescan dirty cards ------------
  5441   _timer.reset();
  5442   _timer.start();
  5444   // Do the rescan tasks for each of the two spaces
  5445   // (cms_space) in turn.
  5446   // "worker_id" is passed to select the task_queue for "worker_id"
  5447   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
  5448   _timer.stop();
  5449   if (PrintCMSStatistics != 0) {
  5450     gclog_or_tty->print_cr(
  5451       "Finished dirty card rescan work in %dth thread: %3.3f sec",
  5452       worker_id, _timer.seconds());
  5455   // ---------- steal work from other threads ...
  5456   // ---------- ... and drain overflow list.
  5457   _timer.reset();
  5458   _timer.start();
  5459   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
  5460   _timer.stop();
  5461   if (PrintCMSStatistics != 0) {
  5462     gclog_or_tty->print_cr(
  5463       "Finished work stealing in %dth thread: %3.3f sec",
  5464       worker_id, _timer.seconds());
  5468 // Note that parameter "i" is not used.
  5469 void
  5470 CMSParMarkTask::do_young_space_rescan(uint worker_id,
  5471   OopsInGenClosure* cl, ContiguousSpace* space,
  5472   HeapWord** chunk_array, size_t chunk_top) {
  5473   // Until all tasks completed:
  5474   // . claim an unclaimed task
  5475   // . compute region boundaries corresponding to task claimed
  5476   //   using chunk_array
  5477   // . par_oop_iterate(cl) over that region
  5479   ResourceMark rm;
  5480   HandleMark   hm;
  5482   SequentialSubTasksDone* pst = space->par_seq_tasks();
  5484   uint nth_task = 0;
  5485   uint n_tasks  = pst->n_tasks();
  5487   if (n_tasks > 0) {
  5488     assert(pst->valid(), "Uninitialized use?");
  5489     HeapWord *start, *end;
  5490     while (!pst->is_task_claimed(/* reference */ nth_task)) {
  5491       // We claimed task # nth_task; compute its boundaries.
  5492       if (chunk_top == 0) {  // no samples were taken
  5493         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
  5494         start = space->bottom();
  5495         end   = space->top();
  5496       } else if (nth_task == 0) {
  5497         start = space->bottom();
  5498         end   = chunk_array[nth_task];
  5499       } else if (nth_task < (uint)chunk_top) {
  5500         assert(nth_task >= 1, "Control point invariant");
  5501         start = chunk_array[nth_task - 1];
  5502         end   = chunk_array[nth_task];
  5503       } else {
  5504         assert(nth_task == (uint)chunk_top, "Control point invariant");
  5505         start = chunk_array[chunk_top - 1];
  5506         end   = space->top();
  5508       MemRegion mr(start, end);
  5509       // Verify that mr is in space
  5510       assert(mr.is_empty() || space->used_region().contains(mr),
  5511              "Should be in space");
  5512       // Verify that "start" is an object boundary
  5513       assert(mr.is_empty() || oop(mr.start())->is_oop(),
  5514              "Should be an oop");
  5515       space->par_oop_iterate(mr, cl);
  5517     pst->all_tasks_completed();
  5521 void
  5522 CMSParRemarkTask::do_dirty_card_rescan_tasks(
  5523   CompactibleFreeListSpace* sp, int i,
  5524   Par_MarkRefsIntoAndScanClosure* cl) {
  5525   // Until all tasks completed:
  5526   // . claim an unclaimed task
  5527   // . compute region boundaries corresponding to task claimed
  5528   // . transfer dirty bits ct->mut for that region
  5529   // . apply rescanclosure to dirty mut bits for that region
  5531   ResourceMark rm;
  5532   HandleMark   hm;
  5534   OopTaskQueue* work_q = work_queue(i);
  5535   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
  5536   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
  5537   // CAUTION: This closure has state that persists across calls to
  5538   // the work method dirty_range_iterate_clear() in that it has
  5539   // imbedded in it a (subtype of) UpwardsObjectClosure. The
  5540   // use of that state in the imbedded UpwardsObjectClosure instance
  5541   // assumes that the cards are always iterated (even if in parallel
  5542   // by several threads) in monotonically increasing order per each
  5543   // thread. This is true of the implementation below which picks
  5544   // card ranges (chunks) in monotonically increasing order globally
  5545   // and, a-fortiori, in monotonically increasing order per thread
  5546   // (the latter order being a subsequence of the former).
  5547   // If the work code below is ever reorganized into a more chaotic
  5548   // work-partitioning form than the current "sequential tasks"
  5549   // paradigm, the use of that persistent state will have to be
  5550   // revisited and modified appropriately. See also related
  5551   // bug 4756801 work on which should examine this code to make
  5552   // sure that the changes there do not run counter to the
  5553   // assumptions made here and necessary for correctness and
  5554   // efficiency. Note also that this code might yield inefficient
  5555   // behaviour in the case of very large objects that span one or
  5556   // more work chunks. Such objects would potentially be scanned
  5557   // several times redundantly. Work on 4756801 should try and
  5558   // address that performance anomaly if at all possible. XXX
  5559   MemRegion  full_span  = _collector->_span;
  5560   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
  5561   MarkFromDirtyCardsClosure
  5562     greyRescanClosure(_collector, full_span, // entire span of interest
  5563                       sp, bm, work_q, cl);
  5565   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
  5566   assert(pst->valid(), "Uninitialized use?");
  5567   uint nth_task = 0;
  5568   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
  5569   MemRegion span = sp->used_region();
  5570   HeapWord* start_addr = span.start();
  5571   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
  5572                                            alignment);
  5573   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
  5574   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
  5575          start_addr, "Check alignment");
  5576   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
  5577          chunk_size, "Check alignment");
  5579   while (!pst->is_task_claimed(/* reference */ nth_task)) {
  5580     // Having claimed the nth_task, compute corresponding mem-region,
  5581     // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
  5582     // The alignment restriction ensures that we do not need any
  5583     // synchronization with other gang-workers while setting or
  5584     // clearing bits in thus chunk of the MUT.
  5585     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
  5586                                     start_addr + (nth_task+1)*chunk_size);
  5587     // The last chunk's end might be way beyond end of the
  5588     // used region. In that case pull back appropriately.
  5589     if (this_span.end() > end_addr) {
  5590       this_span.set_end(end_addr);
  5591       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
  5593     // Iterate over the dirty cards covering this chunk, marking them
  5594     // precleaned, and setting the corresponding bits in the mod union
  5595     // table. Since we have been careful to partition at Card and MUT-word
  5596     // boundaries no synchronization is needed between parallel threads.
  5597     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
  5598                                                  &modUnionClosure);
  5600     // Having transferred these marks into the modUnionTable,
  5601     // rescan the marked objects on the dirty cards in the modUnionTable.
  5602     // Even if this is at a synchronous collection, the initial marking
  5603     // may have been done during an asynchronous collection so there
  5604     // may be dirty bits in the mod-union table.
  5605     _collector->_modUnionTable.dirty_range_iterate_clear(
  5606                   this_span, &greyRescanClosure);
  5607     _collector->_modUnionTable.verifyNoOneBitsInRange(
  5608                                  this_span.start(),
  5609                                  this_span.end());
  5611   pst->all_tasks_completed();  // declare that i am done
  5614 // . see if we can share work_queues with ParNew? XXX
  5615 void
  5616 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
  5617                                 int* seed) {
  5618   OopTaskQueue* work_q = work_queue(i);
  5619   NOT_PRODUCT(int num_steals = 0;)
  5620   oop obj_to_scan;
  5621   CMSBitMap* bm = &(_collector->_markBitMap);
  5623   while (true) {
  5624     // Completely finish any left over work from (an) earlier round(s)
  5625     cl->trim_queue(0);
  5626     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
  5627                                          (size_t)ParGCDesiredObjsFromOverflowList);
  5628     // Now check if there's any work in the overflow list
  5629     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
  5630     // only affects the number of attempts made to get work from the
  5631     // overflow list and does not affect the number of workers.  Just
  5632     // pass ParallelGCThreads so this behavior is unchanged.
  5633     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
  5634                                                 work_q,
  5635                                                 ParallelGCThreads)) {
  5636       // found something in global overflow list;
  5637       // not yet ready to go stealing work from others.
  5638       // We'd like to assert(work_q->size() != 0, ...)
  5639       // because we just took work from the overflow list,
  5640       // but of course we can't since all of that could have
  5641       // been already stolen from us.
  5642       // "He giveth and He taketh away."
  5643       continue;
  5645     // Verify that we have no work before we resort to stealing
  5646     assert(work_q->size() == 0, "Have work, shouldn't steal");
  5647     // Try to steal from other queues that have work
  5648     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
  5649       NOT_PRODUCT(num_steals++;)
  5650       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
  5651       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
  5652       // Do scanning work
  5653       obj_to_scan->oop_iterate(cl);
  5654       // Loop around, finish this work, and try to steal some more
  5655     } else if (terminator()->offer_termination()) {
  5656         break;  // nirvana from the infinite cycle
  5659   NOT_PRODUCT(
  5660     if (PrintCMSStatistics != 0) {
  5661       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
  5664   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
  5665          "Else our work is not yet done");
  5668 // Record object boundaries in _eden_chunk_array by sampling the eden
  5669 // top in the slow-path eden object allocation code path and record
  5670 // the boundaries, if CMSEdenChunksRecordAlways is true. If
  5671 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
  5672 // sampling in sample_eden() that activates during the part of the
  5673 // preclean phase.
  5674 void CMSCollector::sample_eden_chunk() {
  5675   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
  5676     if (_eden_chunk_lock->try_lock()) {
  5677       // Record a sample. This is the critical section. The contents
  5678       // of the _eden_chunk_array have to be non-decreasing in the
  5679       // address order.
  5680       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
  5681       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
  5682              "Unexpected state of Eden");
  5683       if (_eden_chunk_index == 0 ||
  5684           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
  5685            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
  5686                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
  5687         _eden_chunk_index++;  // commit sample
  5689       _eden_chunk_lock->unlock();
  5694 // Return a thread-local PLAB recording array, as appropriate.
  5695 void* CMSCollector::get_data_recorder(int thr_num) {
  5696   if (_survivor_plab_array != NULL &&
  5697       (CMSPLABRecordAlways ||
  5698        (_collectorState > Marking && _collectorState < FinalMarking))) {
  5699     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
  5700     ChunkArray* ca = &_survivor_plab_array[thr_num];
  5701     ca->reset();   // clear it so that fresh data is recorded
  5702     return (void*) ca;
  5703   } else {
  5704     return NULL;
  5708 // Reset all the thread-local PLAB recording arrays
  5709 void CMSCollector::reset_survivor_plab_arrays() {
  5710   for (uint i = 0; i < ParallelGCThreads; i++) {
  5711     _survivor_plab_array[i].reset();
  5715 // Merge the per-thread plab arrays into the global survivor chunk
  5716 // array which will provide the partitioning of the survivor space
  5717 // for CMS initial scan and rescan.
  5718 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
  5719                                               int no_of_gc_threads) {
  5720   assert(_survivor_plab_array  != NULL, "Error");
  5721   assert(_survivor_chunk_array != NULL, "Error");
  5722   assert(_collectorState == FinalMarking ||
  5723          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
  5724   for (int j = 0; j < no_of_gc_threads; j++) {
  5725     _cursor[j] = 0;
  5727   HeapWord* top = surv->top();
  5728   size_t i;
  5729   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
  5730     HeapWord* min_val = top;          // Higher than any PLAB address
  5731     uint      min_tid = 0;            // position of min_val this round
  5732     for (int j = 0; j < no_of_gc_threads; j++) {
  5733       ChunkArray* cur_sca = &_survivor_plab_array[j];
  5734       if (_cursor[j] == cur_sca->end()) {
  5735         continue;
  5737       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
  5738       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
  5739       assert(surv->used_region().contains(cur_val), "Out of bounds value");
  5740       if (cur_val < min_val) {
  5741         min_tid = j;
  5742         min_val = cur_val;
  5743       } else {
  5744         assert(cur_val < top, "All recorded addresses should be less");
  5747     // At this point min_val and min_tid are respectively
  5748     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
  5749     // and the thread (j) that witnesses that address.
  5750     // We record this address in the _survivor_chunk_array[i]
  5751     // and increment _cursor[min_tid] prior to the next round i.
  5752     if (min_val == top) {
  5753       break;
  5755     _survivor_chunk_array[i] = min_val;
  5756     _cursor[min_tid]++;
  5758   // We are all done; record the size of the _survivor_chunk_array
  5759   _survivor_chunk_index = i; // exclusive: [0, i)
  5760   if (PrintCMSStatistics > 0) {
  5761     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
  5763   // Verify that we used up all the recorded entries
  5764   #ifdef ASSERT
  5765     size_t total = 0;
  5766     for (int j = 0; j < no_of_gc_threads; j++) {
  5767       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
  5768       total += _cursor[j];
  5770     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
  5771     // Check that the merged array is in sorted order
  5772     if (total > 0) {
  5773       for (size_t i = 0; i < total - 1; i++) {
  5774         if (PrintCMSStatistics > 0) {
  5775           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
  5776                               i, _survivor_chunk_array[i]);
  5778         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
  5779                "Not sorted");
  5782   #endif // ASSERT
  5785 // Set up the space's par_seq_tasks structure for work claiming
  5786 // for parallel initial scan and rescan of young gen.
  5787 // See ParRescanTask where this is currently used.
  5788 void
  5789 CMSCollector::
  5790 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
  5791   assert(n_threads > 0, "Unexpected n_threads argument");
  5792   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
  5794   // Eden space
  5795   if (!dng->eden()->is_empty()) {
  5796     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
  5797     assert(!pst->valid(), "Clobbering existing data?");
  5798     // Each valid entry in [0, _eden_chunk_index) represents a task.
  5799     size_t n_tasks = _eden_chunk_index + 1;
  5800     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
  5801     // Sets the condition for completion of the subtask (how many threads
  5802     // need to finish in order to be done).
  5803     pst->set_n_threads(n_threads);
  5804     pst->set_n_tasks((int)n_tasks);
  5807   // Merge the survivor plab arrays into _survivor_chunk_array
  5808   if (_survivor_plab_array != NULL) {
  5809     merge_survivor_plab_arrays(dng->from(), n_threads);
  5810   } else {
  5811     assert(_survivor_chunk_index == 0, "Error");
  5814   // To space
  5816     SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
  5817     assert(!pst->valid(), "Clobbering existing data?");
  5818     // Sets the condition for completion of the subtask (how many threads
  5819     // need to finish in order to be done).
  5820     pst->set_n_threads(n_threads);
  5821     pst->set_n_tasks(1);
  5822     assert(pst->valid(), "Error");
  5825   // From space
  5827     SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
  5828     assert(!pst->valid(), "Clobbering existing data?");
  5829     size_t n_tasks = _survivor_chunk_index + 1;
  5830     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
  5831     // Sets the condition for completion of the subtask (how many threads
  5832     // need to finish in order to be done).
  5833     pst->set_n_threads(n_threads);
  5834     pst->set_n_tasks((int)n_tasks);
  5835     assert(pst->valid(), "Error");
  5839 // Parallel version of remark
  5840 void CMSCollector::do_remark_parallel() {
  5841   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5842   FlexibleWorkGang* workers = gch->workers();
  5843   assert(workers != NULL, "Need parallel worker threads.");
  5844   // Choose to use the number of GC workers most recently set
  5845   // into "active_workers".  If active_workers is not set, set it
  5846   // to ParallelGCThreads.
  5847   int n_workers = workers->active_workers();
  5848   if (n_workers == 0) {
  5849     assert(n_workers > 0, "Should have been set during scavenge");
  5850     n_workers = ParallelGCThreads;
  5851     workers->set_active_workers(n_workers);
  5853   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
  5855   CMSParRemarkTask tsk(this,
  5856     cms_space,
  5857     n_workers, workers, task_queues());
  5859   // Set up for parallel process_strong_roots work.
  5860   gch->set_par_threads(n_workers);
  5861   // We won't be iterating over the cards in the card table updating
  5862   // the younger_gen cards, so we shouldn't call the following else
  5863   // the verification code as well as subsequent younger_refs_iterate
  5864   // code would get confused. XXX
  5865   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
  5867   // The young gen rescan work will not be done as part of
  5868   // process_strong_roots (which currently doesn't knw how to
  5869   // parallelize such a scan), but rather will be broken up into
  5870   // a set of parallel tasks (via the sampling that the [abortable]
  5871   // preclean phase did of EdenSpace, plus the [two] tasks of
  5872   // scanning the [two] survivor spaces. Further fine-grain
  5873   // parallelization of the scanning of the survivor spaces
  5874   // themselves, and of precleaning of the younger gen itself
  5875   // is deferred to the future.
  5876   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
  5878   // The dirty card rescan work is broken up into a "sequence"
  5879   // of parallel tasks (per constituent space) that are dynamically
  5880   // claimed by the parallel threads.
  5881   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
  5883   // It turns out that even when we're using 1 thread, doing the work in a
  5884   // separate thread causes wide variance in run times.  We can't help this
  5885   // in the multi-threaded case, but we special-case n=1 here to get
  5886   // repeatable measurements of the 1-thread overhead of the parallel code.
  5887   if (n_workers > 1) {
  5888     // Make refs discovery MT-safe, if it isn't already: it may not
  5889     // necessarily be so, since it's possible that we are doing
  5890     // ST marking.
  5891     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
  5892     GenCollectedHeap::StrongRootsScope srs(gch);
  5893     workers->run_task(&tsk);
  5894   } else {
  5895     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
  5896     GenCollectedHeap::StrongRootsScope srs(gch);
  5897     tsk.work(0);
  5900   gch->set_par_threads(0);  // 0 ==> non-parallel.
  5901   // restore, single-threaded for now, any preserved marks
  5902   // as a result of work_q overflow
  5903   restore_preserved_marks_if_any();
  5906 // Non-parallel version of remark
  5907 void CMSCollector::do_remark_non_parallel() {
  5908   ResourceMark rm;
  5909   HandleMark   hm;
  5910   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5911   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
  5913   MarkRefsIntoAndScanClosure
  5914     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
  5915              &_markStack, this,
  5916              false /* should_yield */, false /* not precleaning */);
  5917   MarkFromDirtyCardsClosure
  5918     markFromDirtyCardsClosure(this, _span,
  5919                               NULL,  // space is set further below
  5920                               &_markBitMap, &_markStack, &mrias_cl);
  5922     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
  5923     // Iterate over the dirty cards, setting the corresponding bits in the
  5924     // mod union table.
  5926       ModUnionClosure modUnionClosure(&_modUnionTable);
  5927       _ct->ct_bs()->dirty_card_iterate(
  5928                       _cmsGen->used_region(),
  5929                       &modUnionClosure);
  5931     // Having transferred these marks into the modUnionTable, we just need
  5932     // to rescan the marked objects on the dirty cards in the modUnionTable.
  5933     // The initial marking may have been done during an asynchronous
  5934     // collection so there may be dirty bits in the mod-union table.
  5935     const int alignment =
  5936       CardTableModRefBS::card_size * BitsPerWord;
  5938       // ... First handle dirty cards in CMS gen
  5939       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
  5940       MemRegion ur = _cmsGen->used_region();
  5941       HeapWord* lb = ur.start();
  5942       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
  5943       MemRegion cms_span(lb, ub);
  5944       _modUnionTable.dirty_range_iterate_clear(cms_span,
  5945                                                &markFromDirtyCardsClosure);
  5946       verify_work_stacks_empty();
  5947       if (PrintCMSStatistics != 0) {
  5948         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
  5949           markFromDirtyCardsClosure.num_dirty_cards());
  5953   if (VerifyDuringGC &&
  5954       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  5955     HandleMark hm;  // Discard invalid handles created during verification
  5956     Universe::verify();
  5959     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
  5961     verify_work_stacks_empty();
  5963     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  5964     GenCollectedHeap::StrongRootsScope srs(gch);
  5965     gch->gen_process_strong_roots(_cmsGen->level(),
  5966                                   true,  // younger gens as roots
  5967                                   false, // use the local StrongRootsScope
  5968                                   false, // not scavenging
  5969                                   SharedHeap::ScanningOption(roots_scanning_options()),
  5970                                   &mrias_cl,
  5971                                   true,   // walk code active on stacks
  5972                                   NULL,
  5973                                   NULL);  // The dirty klasses will be handled below
  5975     assert(should_unload_classes()
  5976            || (roots_scanning_options() & SharedHeap::SO_CodeCache),
  5977            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
  5981     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
  5983     verify_work_stacks_empty();
  5985     // Scan all class loader data objects that might have been introduced
  5986     // during concurrent marking.
  5987     ResourceMark rm;
  5988     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
  5989     for (int i = 0; i < array->length(); i++) {
  5990       mrias_cl.do_class_loader_data(array->at(i));
  5993     // We don't need to keep track of new CLDs anymore.
  5994     ClassLoaderDataGraph::remember_new_clds(false);
  5996     verify_work_stacks_empty();
  6000     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
  6002     verify_work_stacks_empty();
  6004     RemarkKlassClosure remark_klass_closure(&mrias_cl);
  6005     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
  6007     verify_work_stacks_empty();
  6010   // We might have added oops to ClassLoaderData::_handles during the
  6011   // concurrent marking phase. These oops point to newly allocated objects
  6012   // that are guaranteed to be kept alive. Either by the direct allocation
  6013   // code, or when the young collector processes the strong roots. Hence,
  6014   // we don't have to revisit the _handles block during the remark phase.
  6016   verify_work_stacks_empty();
  6017   // Restore evacuated mark words, if any, used for overflow list links
  6018   if (!CMSOverflowEarlyRestoration) {
  6019     restore_preserved_marks_if_any();
  6021   verify_overflow_empty();
  6024 ////////////////////////////////////////////////////////
  6025 // Parallel Reference Processing Task Proxy Class
  6026 ////////////////////////////////////////////////////////
  6027 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
  6028   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
  6029   CMSCollector*          _collector;
  6030   CMSBitMap*             _mark_bit_map;
  6031   const MemRegion        _span;
  6032   ProcessTask&           _task;
  6034 public:
  6035   CMSRefProcTaskProxy(ProcessTask&     task,
  6036                       CMSCollector*    collector,
  6037                       const MemRegion& span,
  6038                       CMSBitMap*       mark_bit_map,
  6039                       AbstractWorkGang* workers,
  6040                       OopTaskQueueSet* task_queues):
  6041     // XXX Should superclass AGTWOQ also know about AWG since it knows
  6042     // about the task_queues used by the AWG? Then it could initialize
  6043     // the terminator() object. See 6984287. The set_for_termination()
  6044     // below is a temporary band-aid for the regression in 6984287.
  6045     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
  6046       task_queues),
  6047     _task(task),
  6048     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
  6050     assert(_collector->_span.equals(_span) && !_span.is_empty(),
  6051            "Inconsistency in _span");
  6052     set_for_termination(workers->active_workers());
  6055   OopTaskQueueSet* task_queues() { return queues(); }
  6057   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
  6059   void do_work_steal(int i,
  6060                      CMSParDrainMarkingStackClosure* drain,
  6061                      CMSParKeepAliveClosure* keep_alive,
  6062                      int* seed);
  6064   virtual void work(uint worker_id);
  6065 };
  6067 void CMSRefProcTaskProxy::work(uint worker_id) {
  6068   assert(_collector->_span.equals(_span), "Inconsistency in _span");
  6069   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
  6070                                         _mark_bit_map,
  6071                                         work_queue(worker_id));
  6072   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
  6073                                                  _mark_bit_map,
  6074                                                  work_queue(worker_id));
  6075   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
  6076   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
  6077   if (_task.marks_oops_alive()) {
  6078     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
  6079                   _collector->hash_seed(worker_id));
  6081   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
  6082   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
  6085 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
  6086   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
  6087   EnqueueTask& _task;
  6089 public:
  6090   CMSRefEnqueueTaskProxy(EnqueueTask& task)
  6091     : AbstractGangTask("Enqueue reference objects in parallel"),
  6092       _task(task)
  6093   { }
  6095   virtual void work(uint worker_id)
  6097     _task.work(worker_id);
  6099 };
  6101 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
  6102   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
  6103    _span(span),
  6104    _bit_map(bit_map),
  6105    _work_queue(work_queue),
  6106    _mark_and_push(collector, span, bit_map, work_queue),
  6107    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
  6108                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
  6109 { }
  6111 // . see if we can share work_queues with ParNew? XXX
  6112 void CMSRefProcTaskProxy::do_work_steal(int i,
  6113   CMSParDrainMarkingStackClosure* drain,
  6114   CMSParKeepAliveClosure* keep_alive,
  6115   int* seed) {
  6116   OopTaskQueue* work_q = work_queue(i);
  6117   NOT_PRODUCT(int num_steals = 0;)
  6118   oop obj_to_scan;
  6120   while (true) {
  6121     // Completely finish any left over work from (an) earlier round(s)
  6122     drain->trim_queue(0);
  6123     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
  6124                                          (size_t)ParGCDesiredObjsFromOverflowList);
  6125     // Now check if there's any work in the overflow list
  6126     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
  6127     // only affects the number of attempts made to get work from the
  6128     // overflow list and does not affect the number of workers.  Just
  6129     // pass ParallelGCThreads so this behavior is unchanged.
  6130     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
  6131                                                 work_q,
  6132                                                 ParallelGCThreads)) {
  6133       // Found something in global overflow list;
  6134       // not yet ready to go stealing work from others.
  6135       // We'd like to assert(work_q->size() != 0, ...)
  6136       // because we just took work from the overflow list,
  6137       // but of course we can't, since all of that might have
  6138       // been already stolen from us.
  6139       continue;
  6141     // Verify that we have no work before we resort to stealing
  6142     assert(work_q->size() == 0, "Have work, shouldn't steal");
  6143     // Try to steal from other queues that have work
  6144     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
  6145       NOT_PRODUCT(num_steals++;)
  6146       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
  6147       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
  6148       // Do scanning work
  6149       obj_to_scan->oop_iterate(keep_alive);
  6150       // Loop around, finish this work, and try to steal some more
  6151     } else if (terminator()->offer_termination()) {
  6152       break;  // nirvana from the infinite cycle
  6155   NOT_PRODUCT(
  6156     if (PrintCMSStatistics != 0) {
  6157       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
  6162 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
  6164   GenCollectedHeap* gch = GenCollectedHeap::heap();
  6165   FlexibleWorkGang* workers = gch->workers();
  6166   assert(workers != NULL, "Need parallel worker threads.");
  6167   CMSRefProcTaskProxy rp_task(task, &_collector,
  6168                               _collector.ref_processor()->span(),
  6169                               _collector.markBitMap(),
  6170                               workers, _collector.task_queues());
  6171   workers->run_task(&rp_task);
  6174 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
  6177   GenCollectedHeap* gch = GenCollectedHeap::heap();
  6178   FlexibleWorkGang* workers = gch->workers();
  6179   assert(workers != NULL, "Need parallel worker threads.");
  6180   CMSRefEnqueueTaskProxy enq_task(task);
  6181   workers->run_task(&enq_task);
  6184 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
  6186   ResourceMark rm;
  6187   HandleMark   hm;
  6189   ReferenceProcessor* rp = ref_processor();
  6190   assert(rp->span().equals(_span), "Spans should be equal");
  6191   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
  6192   // Process weak references.
  6193   rp->setup_policy(clear_all_soft_refs);
  6194   verify_work_stacks_empty();
  6196   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
  6197                                           &_markStack, false /* !preclean */);
  6198   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
  6199                                 _span, &_markBitMap, &_markStack,
  6200                                 &cmsKeepAliveClosure, false /* !preclean */);
  6202     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
  6204     ReferenceProcessorStats stats;
  6205     if (rp->processing_is_mt()) {
  6206       // Set the degree of MT here.  If the discovery is done MT, there
  6207       // may have been a different number of threads doing the discovery
  6208       // and a different number of discovered lists may have Ref objects.
  6209       // That is OK as long as the Reference lists are balanced (see
  6210       // balance_all_queues() and balance_queues()).
  6211       GenCollectedHeap* gch = GenCollectedHeap::heap();
  6212       int active_workers = ParallelGCThreads;
  6213       FlexibleWorkGang* workers = gch->workers();
  6214       if (workers != NULL) {
  6215         active_workers = workers->active_workers();
  6216         // The expectation is that active_workers will have already
  6217         // been set to a reasonable value.  If it has not been set,
  6218         // investigate.
  6219         assert(active_workers > 0, "Should have been set during scavenge");
  6221       rp->set_active_mt_degree(active_workers);
  6222       CMSRefProcTaskExecutor task_executor(*this);
  6223       stats = rp->process_discovered_references(&_is_alive_closure,
  6224                                         &cmsKeepAliveClosure,
  6225                                         &cmsDrainMarkingStackClosure,
  6226                                         &task_executor,
  6227                                         _gc_timer_cm);
  6228     } else {
  6229       stats = rp->process_discovered_references(&_is_alive_closure,
  6230                                         &cmsKeepAliveClosure,
  6231                                         &cmsDrainMarkingStackClosure,
  6232                                         NULL,
  6233                                         _gc_timer_cm);
  6235     _gc_tracer_cm->report_gc_reference_stats(stats);
  6239   // This is the point where the entire marking should have completed.
  6240   verify_work_stacks_empty();
  6242   if (should_unload_classes()) {
  6244       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
  6246       // Unload classes and purge the SystemDictionary.
  6247       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
  6249       // Unload nmethods.
  6250       CodeCache::do_unloading(&_is_alive_closure, purged_class);
  6252       // Prune dead klasses from subklass/sibling/implementor lists.
  6253       Klass::clean_weak_klass_links(&_is_alive_closure);
  6257       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
  6258       // Clean up unreferenced symbols in symbol table.
  6259       SymbolTable::unlink();
  6263   // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
  6264   // Need to check if we really scanned the StringTable.
  6265   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
  6266     GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
  6267     // Delete entries for dead interned strings.
  6268     StringTable::unlink(&_is_alive_closure);
  6271   // Restore any preserved marks as a result of mark stack or
  6272   // work queue overflow
  6273   restore_preserved_marks_if_any();  // done single-threaded for now
  6275   rp->set_enqueuing_is_done(true);
  6276   if (rp->processing_is_mt()) {
  6277     rp->balance_all_queues();
  6278     CMSRefProcTaskExecutor task_executor(*this);
  6279     rp->enqueue_discovered_references(&task_executor);
  6280   } else {
  6281     rp->enqueue_discovered_references(NULL);
  6283   rp->verify_no_references_recorded();
  6284   assert(!rp->discovery_enabled(), "should have been disabled");
  6287 #ifndef PRODUCT
  6288 void CMSCollector::check_correct_thread_executing() {
  6289   Thread* t = Thread::current();
  6290   // Only the VM thread or the CMS thread should be here.
  6291   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
  6292          "Unexpected thread type");
  6293   // If this is the vm thread, the foreground process
  6294   // should not be waiting.  Note that _foregroundGCIsActive is
  6295   // true while the foreground collector is waiting.
  6296   if (_foregroundGCShouldWait) {
  6297     // We cannot be the VM thread
  6298     assert(t->is_ConcurrentGC_thread(),
  6299            "Should be CMS thread");
  6300   } else {
  6301     // We can be the CMS thread only if we are in a stop-world
  6302     // phase of CMS collection.
  6303     if (t->is_ConcurrentGC_thread()) {
  6304       assert(_collectorState == InitialMarking ||
  6305              _collectorState == FinalMarking,
  6306              "Should be a stop-world phase");
  6307       // The CMS thread should be holding the CMS_token.
  6308       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  6309              "Potential interference with concurrently "
  6310              "executing VM thread");
  6314 #endif
  6316 void CMSCollector::sweep(bool asynch) {
  6317   assert(_collectorState == Sweeping, "just checking");
  6318   check_correct_thread_executing();
  6319   verify_work_stacks_empty();
  6320   verify_overflow_empty();
  6321   increment_sweep_count();
  6322   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
  6324   _inter_sweep_timer.stop();
  6325   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
  6326   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
  6328   assert(!_intra_sweep_timer.is_active(), "Should not be active");
  6329   _intra_sweep_timer.reset();
  6330   _intra_sweep_timer.start();
  6331   if (asynch) {
  6332     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  6333     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
  6334     // First sweep the old gen
  6336       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
  6337                                bitMapLock());
  6338       sweepWork(_cmsGen, asynch);
  6341     // Update Universe::_heap_*_at_gc figures.
  6342     // We need all the free list locks to make the abstract state
  6343     // transition from Sweeping to Resetting. See detailed note
  6344     // further below.
  6346       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
  6347       // Update heap occupancy information which is used as
  6348       // input to soft ref clearing policy at the next gc.
  6349       Universe::update_heap_info_at_gc();
  6350       _collectorState = Resizing;
  6352   } else {
  6353     // already have needed locks
  6354     sweepWork(_cmsGen,  asynch);
  6355     // Update heap occupancy information which is used as
  6356     // input to soft ref clearing policy at the next gc.
  6357     Universe::update_heap_info_at_gc();
  6358     _collectorState = Resizing;
  6360   verify_work_stacks_empty();
  6361   verify_overflow_empty();
  6363   if (should_unload_classes()) {
  6364     ClassLoaderDataGraph::purge();
  6367   _intra_sweep_timer.stop();
  6368   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
  6370   _inter_sweep_timer.reset();
  6371   _inter_sweep_timer.start();
  6373   // We need to use a monotonically non-deccreasing time in ms
  6374   // or we will see time-warp warnings and os::javaTimeMillis()
  6375   // does not guarantee monotonicity.
  6376   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  6377   update_time_of_last_gc(now);
  6379   // NOTE on abstract state transitions:
  6380   // Mutators allocate-live and/or mark the mod-union table dirty
  6381   // based on the state of the collection.  The former is done in
  6382   // the interval [Marking, Sweeping] and the latter in the interval
  6383   // [Marking, Sweeping).  Thus the transitions into the Marking state
  6384   // and out of the Sweeping state must be synchronously visible
  6385   // globally to the mutators.
  6386   // The transition into the Marking state happens with the world
  6387   // stopped so the mutators will globally see it.  Sweeping is
  6388   // done asynchronously by the background collector so the transition
  6389   // from the Sweeping state to the Resizing state must be done
  6390   // under the freelistLock (as is the check for whether to
  6391   // allocate-live and whether to dirty the mod-union table).
  6392   assert(_collectorState == Resizing, "Change of collector state to"
  6393     " Resizing must be done under the freelistLocks (plural)");
  6395   // Now that sweeping has been completed, we clear
  6396   // the incremental_collection_failed flag,
  6397   // thus inviting a younger gen collection to promote into
  6398   // this generation. If such a promotion may still fail,
  6399   // the flag will be set again when a young collection is
  6400   // attempted.
  6401   GenCollectedHeap* gch = GenCollectedHeap::heap();
  6402   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
  6403   gch->update_full_collections_completed(_collection_count_start);
  6406 // FIX ME!!! Looks like this belongs in CFLSpace, with
  6407 // CMSGen merely delegating to it.
  6408 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
  6409   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
  6410   HeapWord*  minAddr        = _cmsSpace->bottom();
  6411   HeapWord*  largestAddr    =
  6412     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
  6413   if (largestAddr == NULL) {
  6414     // The dictionary appears to be empty.  In this case
  6415     // try to coalesce at the end of the heap.
  6416     largestAddr = _cmsSpace->end();
  6418   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
  6419   size_t nearLargestOffset =
  6420     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
  6421   if (PrintFLSStatistics != 0) {
  6422     gclog_or_tty->print_cr(
  6423       "CMS: Large Block: " PTR_FORMAT ";"
  6424       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
  6425       largestAddr,
  6426       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
  6428   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
  6431 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
  6432   return addr >= _cmsSpace->nearLargestChunk();
  6435 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
  6436   return _cmsSpace->find_chunk_at_end();
  6439 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
  6440                                                     bool full) {
  6441   // The next lower level has been collected.  Gather any statistics
  6442   // that are of interest at this point.
  6443   if (!full && (current_level + 1) == level()) {
  6444     // Gather statistics on the young generation collection.
  6445     collector()->stats().record_gc0_end(used());
  6449 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
  6450   GenCollectedHeap* gch = GenCollectedHeap::heap();
  6451   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
  6452     "Wrong type of heap");
  6453   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
  6454     gch->gen_policy()->size_policy();
  6455   assert(sp->is_gc_cms_adaptive_size_policy(),
  6456     "Wrong type of size policy");
  6457   return sp;
  6460 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
  6461   if (PrintGCDetails && Verbose) {
  6462     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
  6464   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
  6465   _debug_collection_type =
  6466     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
  6467   if (PrintGCDetails && Verbose) {
  6468     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
  6472 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
  6473   bool asynch) {
  6474   // We iterate over the space(s) underlying this generation,
  6475   // checking the mark bit map to see if the bits corresponding
  6476   // to specific blocks are marked or not. Blocks that are
  6477   // marked are live and are not swept up. All remaining blocks
  6478   // are swept up, with coalescing on-the-fly as we sweep up
  6479   // contiguous free and/or garbage blocks:
  6480   // We need to ensure that the sweeper synchronizes with allocators
  6481   // and stop-the-world collectors. In particular, the following
  6482   // locks are used:
  6483   // . CMS token: if this is held, a stop the world collection cannot occur
  6484   // . freelistLock: if this is held no allocation can occur from this
  6485   //                 generation by another thread
  6486   // . bitMapLock: if this is held, no other thread can access or update
  6487   //
  6489   // Note that we need to hold the freelistLock if we use
  6490   // block iterate below; else the iterator might go awry if
  6491   // a mutator (or promotion) causes block contents to change
  6492   // (for instance if the allocator divvies up a block).
  6493   // If we hold the free list lock, for all practical purposes
  6494   // young generation GC's can't occur (they'll usually need to
  6495   // promote), so we might as well prevent all young generation
  6496   // GC's while we do a sweeping step. For the same reason, we might
  6497   // as well take the bit map lock for the entire duration
  6499   // check that we hold the requisite locks
  6500   assert(have_cms_token(), "Should hold cms token");
  6501   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
  6502          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
  6503         "Should possess CMS token to sweep");
  6504   assert_lock_strong(gen->freelistLock());
  6505   assert_lock_strong(bitMapLock());
  6507   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
  6508   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
  6509   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
  6510                                       _inter_sweep_estimate.padded_average(),
  6511                                       _intra_sweep_estimate.padded_average());
  6512   gen->setNearLargestChunk();
  6515     SweepClosure sweepClosure(this, gen, &_markBitMap,
  6516                             CMSYield && asynch);
  6517     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
  6518     // We need to free-up/coalesce garbage/blocks from a
  6519     // co-terminal free run. This is done in the SweepClosure
  6520     // destructor; so, do not remove this scope, else the
  6521     // end-of-sweep-census below will be off by a little bit.
  6523   gen->cmsSpace()->sweep_completed();
  6524   gen->cmsSpace()->endSweepFLCensus(sweep_count());
  6525   if (should_unload_classes()) {                // unloaded classes this cycle,
  6526     _concurrent_cycles_since_last_unload = 0;   // ... reset count
  6527   } else {                                      // did not unload classes,
  6528     _concurrent_cycles_since_last_unload++;     // ... increment count
  6532 // Reset CMS data structures (for now just the marking bit map)
  6533 // preparatory for the next cycle.
  6534 void CMSCollector::reset(bool asynch) {
  6535   GenCollectedHeap* gch = GenCollectedHeap::heap();
  6536   CMSAdaptiveSizePolicy* sp = size_policy();
  6537   AdaptiveSizePolicyOutput(sp, gch->total_collections());
  6538   if (asynch) {
  6539     CMSTokenSyncWithLocks ts(true, bitMapLock());
  6541     // If the state is not "Resetting", the foreground  thread
  6542     // has done a collection and the resetting.
  6543     if (_collectorState != Resetting) {
  6544       assert(_collectorState == Idling, "The state should only change"
  6545         " because the foreground collector has finished the collection");
  6546       return;
  6549     // Clear the mark bitmap (no grey objects to start with)
  6550     // for the next cycle.
  6551     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  6552     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
  6554     HeapWord* curAddr = _markBitMap.startWord();
  6555     while (curAddr < _markBitMap.endWord()) {
  6556       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
  6557       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
  6558       _markBitMap.clear_large_range(chunk);
  6559       if (ConcurrentMarkSweepThread::should_yield() &&
  6560           !foregroundGCIsActive() &&
  6561           CMSYield) {
  6562         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  6563                "CMS thread should hold CMS token");
  6564         assert_lock_strong(bitMapLock());
  6565         bitMapLock()->unlock();
  6566         ConcurrentMarkSweepThread::desynchronize(true);
  6567         ConcurrentMarkSweepThread::acknowledge_yield_request();
  6568         stopTimer();
  6569         if (PrintCMSStatistics != 0) {
  6570           incrementYields();
  6572         icms_wait();
  6574         // See the comment in coordinator_yield()
  6575         for (unsigned i = 0; i < CMSYieldSleepCount &&
  6576                          ConcurrentMarkSweepThread::should_yield() &&
  6577                          !CMSCollector::foregroundGCIsActive(); ++i) {
  6578           os::sleep(Thread::current(), 1, false);
  6579           ConcurrentMarkSweepThread::acknowledge_yield_request();
  6582         ConcurrentMarkSweepThread::synchronize(true);
  6583         bitMapLock()->lock_without_safepoint_check();
  6584         startTimer();
  6586       curAddr = chunk.end();
  6588     // A successful mostly concurrent collection has been done.
  6589     // Because only the full (i.e., concurrent mode failure) collections
  6590     // are being measured for gc overhead limits, clean the "near" flag
  6591     // and count.
  6592     sp->reset_gc_overhead_limit_count();
  6593     _collectorState = Idling;
  6594   } else {
  6595     // already have the lock
  6596     assert(_collectorState == Resetting, "just checking");
  6597     assert_lock_strong(bitMapLock());
  6598     _markBitMap.clear_all();
  6599     _collectorState = Idling;
  6602   // Stop incremental mode after a cycle completes, so that any future cycles
  6603   // are triggered by allocation.
  6604   stop_icms();
  6606   NOT_PRODUCT(
  6607     if (RotateCMSCollectionTypes) {
  6608       _cmsGen->rotate_debug_collection_type();
  6612   register_gc_end();
  6615 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
  6616   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  6617   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  6618   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
  6619   TraceCollectorStats tcs(counters());
  6621   switch (op) {
  6622     case CMS_op_checkpointRootsInitial: {
  6623       SvcGCMarker sgcm(SvcGCMarker::OTHER);
  6624       checkpointRootsInitial(true);       // asynch
  6625       if (PrintGC) {
  6626         _cmsGen->printOccupancy("initial-mark");
  6628       break;
  6630     case CMS_op_checkpointRootsFinal: {
  6631       SvcGCMarker sgcm(SvcGCMarker::OTHER);
  6632       checkpointRootsFinal(true,    // asynch
  6633                            false,   // !clear_all_soft_refs
  6634                            false);  // !init_mark_was_synchronous
  6635       if (PrintGC) {
  6636         _cmsGen->printOccupancy("remark");
  6638       break;
  6640     default:
  6641       fatal("No such CMS_op");
  6645 #ifndef PRODUCT
  6646 size_t const CMSCollector::skip_header_HeapWords() {
  6647   return FreeChunk::header_size();
  6650 // Try and collect here conditions that should hold when
  6651 // CMS thread is exiting. The idea is that the foreground GC
  6652 // thread should not be blocked if it wants to terminate
  6653 // the CMS thread and yet continue to run the VM for a while
  6654 // after that.
  6655 void CMSCollector::verify_ok_to_terminate() const {
  6656   assert(Thread::current()->is_ConcurrentGC_thread(),
  6657          "should be called by CMS thread");
  6658   assert(!_foregroundGCShouldWait, "should be false");
  6659   // We could check here that all the various low-level locks
  6660   // are not held by the CMS thread, but that is overkill; see
  6661   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
  6662   // is checked.
  6664 #endif
  6666 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
  6667    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
  6668           "missing Printezis mark?");
  6669   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
  6670   size_t size = pointer_delta(nextOneAddr + 1, addr);
  6671   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  6672          "alignment problem");
  6673   assert(size >= 3, "Necessary for Printezis marks to work");
  6674   return size;
  6677 // A variant of the above (block_size_using_printezis_bits()) except
  6678 // that we return 0 if the P-bits are not yet set.
  6679 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
  6680   if (_markBitMap.isMarked(addr + 1)) {
  6681     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
  6682     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
  6683     size_t size = pointer_delta(nextOneAddr + 1, addr);
  6684     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  6685            "alignment problem");
  6686     assert(size >= 3, "Necessary for Printezis marks to work");
  6687     return size;
  6689   return 0;
  6692 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
  6693   size_t sz = 0;
  6694   oop p = (oop)addr;
  6695   if (p->klass_or_null() != NULL) {
  6696     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
  6697   } else {
  6698     sz = block_size_using_printezis_bits(addr);
  6700   assert(sz > 0, "size must be nonzero");
  6701   HeapWord* next_block = addr + sz;
  6702   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
  6703                                              CardTableModRefBS::card_size);
  6704   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
  6705          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
  6706          "must be different cards");
  6707   return next_card;
  6711 // CMS Bit Map Wrapper /////////////////////////////////////////
  6713 // Construct a CMS bit map infrastructure, but don't create the
  6714 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
  6715 // further below.
  6716 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
  6717   _bm(),
  6718   _shifter(shifter),
  6719   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
  6721   _bmStartWord = 0;
  6722   _bmWordSize  = 0;
  6725 bool CMSBitMap::allocate(MemRegion mr) {
  6726   _bmStartWord = mr.start();
  6727   _bmWordSize  = mr.word_size();
  6728   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
  6729                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
  6730   if (!brs.is_reserved()) {
  6731     warning("CMS bit map allocation failure");
  6732     return false;
  6734   // For now we'll just commit all of the bit map up fromt.
  6735   // Later on we'll try to be more parsimonious with swap.
  6736   if (!_virtual_space.initialize(brs, brs.size())) {
  6737     warning("CMS bit map backing store failure");
  6738     return false;
  6740   assert(_virtual_space.committed_size() == brs.size(),
  6741          "didn't reserve backing store for all of CMS bit map?");
  6742   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
  6743   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
  6744          _bmWordSize, "inconsistency in bit map sizing");
  6745   _bm.set_size(_bmWordSize >> _shifter);
  6747   // bm.clear(); // can we rely on getting zero'd memory? verify below
  6748   assert(isAllClear(),
  6749          "Expected zero'd memory from ReservedSpace constructor");
  6750   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
  6751          "consistency check");
  6752   return true;
  6755 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
  6756   HeapWord *next_addr, *end_addr, *last_addr;
  6757   assert_locked();
  6758   assert(covers(mr), "out-of-range error");
  6759   // XXX assert that start and end are appropriately aligned
  6760   for (next_addr = mr.start(), end_addr = mr.end();
  6761        next_addr < end_addr; next_addr = last_addr) {
  6762     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
  6763     last_addr = dirty_region.end();
  6764     if (!dirty_region.is_empty()) {
  6765       cl->do_MemRegion(dirty_region);
  6766     } else {
  6767       assert(last_addr == end_addr, "program logic");
  6768       return;
  6773 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
  6774   _bm.print_on_error(st, prefix);
  6777 #ifndef PRODUCT
  6778 void CMSBitMap::assert_locked() const {
  6779   CMSLockVerifier::assert_locked(lock());
  6782 bool CMSBitMap::covers(MemRegion mr) const {
  6783   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  6784   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
  6785          "size inconsistency");
  6786   return (mr.start() >= _bmStartWord) &&
  6787          (mr.end()   <= endWord());
  6790 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
  6791     return (start >= _bmStartWord && (start + size) <= endWord());
  6794 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
  6795   // verify that there are no 1 bits in the interval [left, right)
  6796   FalseBitMapClosure falseBitMapClosure;
  6797   iterate(&falseBitMapClosure, left, right);
  6800 void CMSBitMap::region_invariant(MemRegion mr)
  6802   assert_locked();
  6803   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
  6804   assert(!mr.is_empty(), "unexpected empty region");
  6805   assert(covers(mr), "mr should be covered by bit map");
  6806   // convert address range into offset range
  6807   size_t start_ofs = heapWordToOffset(mr.start());
  6808   // Make sure that end() is appropriately aligned
  6809   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
  6810                         (1 << (_shifter+LogHeapWordSize))),
  6811          "Misaligned mr.end()");
  6812   size_t end_ofs   = heapWordToOffset(mr.end());
  6813   assert(end_ofs > start_ofs, "Should mark at least one bit");
  6816 #endif
  6818 bool CMSMarkStack::allocate(size_t size) {
  6819   // allocate a stack of the requisite depth
  6820   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
  6821                    size * sizeof(oop)));
  6822   if (!rs.is_reserved()) {
  6823     warning("CMSMarkStack allocation failure");
  6824     return false;
  6826   if (!_virtual_space.initialize(rs, rs.size())) {
  6827     warning("CMSMarkStack backing store failure");
  6828     return false;
  6830   assert(_virtual_space.committed_size() == rs.size(),
  6831          "didn't reserve backing store for all of CMS stack?");
  6832   _base = (oop*)(_virtual_space.low());
  6833   _index = 0;
  6834   _capacity = size;
  6835   NOT_PRODUCT(_max_depth = 0);
  6836   return true;
  6839 // XXX FIX ME !!! In the MT case we come in here holding a
  6840 // leaf lock. For printing we need to take a further lock
  6841 // which has lower rank. We need to recallibrate the two
  6842 // lock-ranks involved in order to be able to rpint the
  6843 // messages below. (Or defer the printing to the caller.
  6844 // For now we take the expedient path of just disabling the
  6845 // messages for the problematic case.)
  6846 void CMSMarkStack::expand() {
  6847   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
  6848   if (_capacity == MarkStackSizeMax) {
  6849     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
  6850       // We print a warning message only once per CMS cycle.
  6851       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
  6853     return;
  6855   // Double capacity if possible
  6856   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
  6857   // Do not give up existing stack until we have managed to
  6858   // get the double capacity that we desired.
  6859   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
  6860                    new_capacity * sizeof(oop)));
  6861   if (rs.is_reserved()) {
  6862     // Release the backing store associated with old stack
  6863     _virtual_space.release();
  6864     // Reinitialize virtual space for new stack
  6865     if (!_virtual_space.initialize(rs, rs.size())) {
  6866       fatal("Not enough swap for expanded marking stack");
  6868     _base = (oop*)(_virtual_space.low());
  6869     _index = 0;
  6870     _capacity = new_capacity;
  6871   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
  6872     // Failed to double capacity, continue;
  6873     // we print a detail message only once per CMS cycle.
  6874     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
  6875             SIZE_FORMAT"K",
  6876             _capacity / K, new_capacity / K);
  6881 // Closures
  6882 // XXX: there seems to be a lot of code  duplication here;
  6883 // should refactor and consolidate common code.
  6885 // This closure is used to mark refs into the CMS generation in
  6886 // the CMS bit map. Called at the first checkpoint. This closure
  6887 // assumes that we do not need to re-mark dirty cards; if the CMS
  6888 // generation on which this is used is not an oldest
  6889 // generation then this will lose younger_gen cards!
  6891 MarkRefsIntoClosure::MarkRefsIntoClosure(
  6892   MemRegion span, CMSBitMap* bitMap):
  6893     _span(span),
  6894     _bitMap(bitMap)
  6896     assert(_ref_processor == NULL, "deliberately left NULL");
  6897     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
  6900 void MarkRefsIntoClosure::do_oop(oop obj) {
  6901   // if p points into _span, then mark corresponding bit in _markBitMap
  6902   assert(obj->is_oop(), "expected an oop");
  6903   HeapWord* addr = (HeapWord*)obj;
  6904   if (_span.contains(addr)) {
  6905     // this should be made more efficient
  6906     _bitMap->mark(addr);
  6910 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
  6911 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
  6913 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
  6914   MemRegion span, CMSBitMap* bitMap):
  6915     _span(span),
  6916     _bitMap(bitMap)
  6918     assert(_ref_processor == NULL, "deliberately left NULL");
  6919     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
  6922 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
  6923   // if p points into _span, then mark corresponding bit in _markBitMap
  6924   assert(obj->is_oop(), "expected an oop");
  6925   HeapWord* addr = (HeapWord*)obj;
  6926   if (_span.contains(addr)) {
  6927     // this should be made more efficient
  6928     _bitMap->par_mark(addr);
  6932 void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
  6933 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
  6935 // A variant of the above, used for CMS marking verification.
  6936 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
  6937   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
  6938     _span(span),
  6939     _verification_bm(verification_bm),
  6940     _cms_bm(cms_bm)
  6942     assert(_ref_processor == NULL, "deliberately left NULL");
  6943     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
  6946 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
  6947   // if p points into _span, then mark corresponding bit in _markBitMap
  6948   assert(obj->is_oop(), "expected an oop");
  6949   HeapWord* addr = (HeapWord*)obj;
  6950   if (_span.contains(addr)) {
  6951     _verification_bm->mark(addr);
  6952     if (!_cms_bm->isMarked(addr)) {
  6953       oop(addr)->print();
  6954       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
  6955       fatal("... aborting");
  6960 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
  6961 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
  6963 //////////////////////////////////////////////////
  6964 // MarkRefsIntoAndScanClosure
  6965 //////////////////////////////////////////////////
  6967 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
  6968                                                        ReferenceProcessor* rp,
  6969                                                        CMSBitMap* bit_map,
  6970                                                        CMSBitMap* mod_union_table,
  6971                                                        CMSMarkStack*  mark_stack,
  6972                                                        CMSCollector* collector,
  6973                                                        bool should_yield,
  6974                                                        bool concurrent_precleaning):
  6975   _collector(collector),
  6976   _span(span),
  6977   _bit_map(bit_map),
  6978   _mark_stack(mark_stack),
  6979   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
  6980                       mark_stack, concurrent_precleaning),
  6981   _yield(should_yield),
  6982   _concurrent_precleaning(concurrent_precleaning),
  6983   _freelistLock(NULL)
  6985   _ref_processor = rp;
  6986   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
  6989 // This closure is used to mark refs into the CMS generation at the
  6990 // second (final) checkpoint, and to scan and transitively follow
  6991 // the unmarked oops. It is also used during the concurrent precleaning
  6992 // phase while scanning objects on dirty cards in the CMS generation.
  6993 // The marks are made in the marking bit map and the marking stack is
  6994 // used for keeping the (newly) grey objects during the scan.
  6995 // The parallel version (Par_...) appears further below.
  6996 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
  6997   if (obj != NULL) {
  6998     assert(obj->is_oop(), "expected an oop");
  6999     HeapWord* addr = (HeapWord*)obj;
  7000     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
  7001     assert(_collector->overflow_list_is_empty(),
  7002            "overflow list should be empty");
  7003     if (_span.contains(addr) &&
  7004         !_bit_map->isMarked(addr)) {
  7005       // mark bit map (object is now grey)
  7006       _bit_map->mark(addr);
  7007       // push on marking stack (stack should be empty), and drain the
  7008       // stack by applying this closure to the oops in the oops popped
  7009       // from the stack (i.e. blacken the grey objects)
  7010       bool res = _mark_stack->push(obj);
  7011       assert(res, "Should have space to push on empty stack");
  7012       do {
  7013         oop new_oop = _mark_stack->pop();
  7014         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
  7015         assert(_bit_map->isMarked((HeapWord*)new_oop),
  7016                "only grey objects on this stack");
  7017         // iterate over the oops in this oop, marking and pushing
  7018         // the ones in CMS heap (i.e. in _span).
  7019         new_oop->oop_iterate(&_pushAndMarkClosure);
  7020         // check if it's time to yield
  7021         do_yield_check();
  7022       } while (!_mark_stack->isEmpty() ||
  7023                (!_concurrent_precleaning && take_from_overflow_list()));
  7024         // if marking stack is empty, and we are not doing this
  7025         // during precleaning, then check the overflow list
  7027     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
  7028     assert(_collector->overflow_list_is_empty(),
  7029            "overflow list was drained above");
  7030     // We could restore evacuated mark words, if any, used for
  7031     // overflow list links here because the overflow list is
  7032     // provably empty here. That would reduce the maximum
  7033     // size requirements for preserved_{oop,mark}_stack.
  7034     // But we'll just postpone it until we are all done
  7035     // so we can just stream through.
  7036     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
  7037       _collector->restore_preserved_marks_if_any();
  7038       assert(_collector->no_preserved_marks(), "No preserved marks");
  7040     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
  7041            "All preserved marks should have been restored above");
  7045 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
  7046 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
  7048 void MarkRefsIntoAndScanClosure::do_yield_work() {
  7049   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  7050          "CMS thread should hold CMS token");
  7051   assert_lock_strong(_freelistLock);
  7052   assert_lock_strong(_bit_map->lock());
  7053   // relinquish the free_list_lock and bitMaplock()
  7054   _bit_map->lock()->unlock();
  7055   _freelistLock->unlock();
  7056   ConcurrentMarkSweepThread::desynchronize(true);
  7057   ConcurrentMarkSweepThread::acknowledge_yield_request();
  7058   _collector->stopTimer();
  7059   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  7060   if (PrintCMSStatistics != 0) {
  7061     _collector->incrementYields();
  7063   _collector->icms_wait();
  7065   // See the comment in coordinator_yield()
  7066   for (unsigned i = 0;
  7067        i < CMSYieldSleepCount &&
  7068        ConcurrentMarkSweepThread::should_yield() &&
  7069        !CMSCollector::foregroundGCIsActive();
  7070        ++i) {
  7071     os::sleep(Thread::current(), 1, false);
  7072     ConcurrentMarkSweepThread::acknowledge_yield_request();
  7075   ConcurrentMarkSweepThread::synchronize(true);
  7076   _freelistLock->lock_without_safepoint_check();
  7077   _bit_map->lock()->lock_without_safepoint_check();
  7078   _collector->startTimer();
  7081 ///////////////////////////////////////////////////////////
  7082 // Par_MarkRefsIntoAndScanClosure: a parallel version of
  7083 //                                 MarkRefsIntoAndScanClosure
  7084 ///////////////////////////////////////////////////////////
  7085 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
  7086   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
  7087   CMSBitMap* bit_map, OopTaskQueue* work_queue):
  7088   _span(span),
  7089   _bit_map(bit_map),
  7090   _work_queue(work_queue),
  7091   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
  7092                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
  7093   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
  7095   _ref_processor = rp;
  7096   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
  7099 // This closure is used to mark refs into the CMS generation at the
  7100 // second (final) checkpoint, and to scan and transitively follow
  7101 // the unmarked oops. The marks are made in the marking bit map and
  7102 // the work_queue is used for keeping the (newly) grey objects during
  7103 // the scan phase whence they are also available for stealing by parallel
  7104 // threads. Since the marking bit map is shared, updates are
  7105 // synchronized (via CAS).
  7106 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
  7107   if (obj != NULL) {
  7108     // Ignore mark word because this could be an already marked oop
  7109     // that may be chained at the end of the overflow list.
  7110     assert(obj->is_oop(true), "expected an oop");
  7111     HeapWord* addr = (HeapWord*)obj;
  7112     if (_span.contains(addr) &&
  7113         !_bit_map->isMarked(addr)) {
  7114       // mark bit map (object will become grey):
  7115       // It is possible for several threads to be
  7116       // trying to "claim" this object concurrently;
  7117       // the unique thread that succeeds in marking the
  7118       // object first will do the subsequent push on
  7119       // to the work queue (or overflow list).
  7120       if (_bit_map->par_mark(addr)) {
  7121         // push on work_queue (which may not be empty), and trim the
  7122         // queue to an appropriate length by applying this closure to
  7123         // the oops in the oops popped from the stack (i.e. blacken the
  7124         // grey objects)
  7125         bool res = _work_queue->push(obj);
  7126         assert(res, "Low water mark should be less than capacity?");
  7127         trim_queue(_low_water_mark);
  7128       } // Else, another thread claimed the object
  7133 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
  7134 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
  7136 // This closure is used to rescan the marked objects on the dirty cards
  7137 // in the mod union table and the card table proper.
  7138 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
  7139   oop p, MemRegion mr) {
  7141   size_t size = 0;
  7142   HeapWord* addr = (HeapWord*)p;
  7143   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
  7144   assert(_span.contains(addr), "we are scanning the CMS generation");
  7145   // check if it's time to yield
  7146   if (do_yield_check()) {
  7147     // We yielded for some foreground stop-world work,
  7148     // and we have been asked to abort this ongoing preclean cycle.
  7149     return 0;
  7151   if (_bitMap->isMarked(addr)) {
  7152     // it's marked; is it potentially uninitialized?
  7153     if (p->klass_or_null() != NULL) {
  7154         // an initialized object; ignore mark word in verification below
  7155         // since we are running concurrent with mutators
  7156         assert(p->is_oop(true), "should be an oop");
  7157         if (p->is_objArray()) {
  7158           // objArrays are precisely marked; restrict scanning
  7159           // to dirty cards only.
  7160           size = CompactibleFreeListSpace::adjustObjectSize(
  7161                    p->oop_iterate(_scanningClosure, mr));
  7162         } else {
  7163           // A non-array may have been imprecisely marked; we need
  7164           // to scan object in its entirety.
  7165           size = CompactibleFreeListSpace::adjustObjectSize(
  7166                    p->oop_iterate(_scanningClosure));
  7168         #ifdef ASSERT
  7169           size_t direct_size =
  7170             CompactibleFreeListSpace::adjustObjectSize(p->size());
  7171           assert(size == direct_size, "Inconsistency in size");
  7172           assert(size >= 3, "Necessary for Printezis marks to work");
  7173           if (!_bitMap->isMarked(addr+1)) {
  7174             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
  7175           } else {
  7176             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
  7177             assert(_bitMap->isMarked(addr+size-1),
  7178                    "inconsistent Printezis mark");
  7180         #endif // ASSERT
  7181     } else {
  7182       // an unitialized object
  7183       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
  7184       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
  7185       size = pointer_delta(nextOneAddr + 1, addr);
  7186       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  7187              "alignment problem");
  7188       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
  7189       // will dirty the card when the klass pointer is installed in the
  7190       // object (signalling the completion of initialization).
  7192   } else {
  7193     // Either a not yet marked object or an uninitialized object
  7194     if (p->klass_or_null() == NULL) {
  7195       // An uninitialized object, skip to the next card, since
  7196       // we may not be able to read its P-bits yet.
  7197       assert(size == 0, "Initial value");
  7198     } else {
  7199       // An object not (yet) reached by marking: we merely need to
  7200       // compute its size so as to go look at the next block.
  7201       assert(p->is_oop(true), "should be an oop");
  7202       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
  7205   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
  7206   return size;
  7209 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
  7210   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  7211          "CMS thread should hold CMS token");
  7212   assert_lock_strong(_freelistLock);
  7213   assert_lock_strong(_bitMap->lock());
  7214   // relinquish the free_list_lock and bitMaplock()
  7215   _bitMap->lock()->unlock();
  7216   _freelistLock->unlock();
  7217   ConcurrentMarkSweepThread::desynchronize(true);
  7218   ConcurrentMarkSweepThread::acknowledge_yield_request();
  7219   _collector->stopTimer();
  7220   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  7221   if (PrintCMSStatistics != 0) {
  7222     _collector->incrementYields();
  7224   _collector->icms_wait();
  7226   // See the comment in coordinator_yield()
  7227   for (unsigned i = 0; i < CMSYieldSleepCount &&
  7228                    ConcurrentMarkSweepThread::should_yield() &&
  7229                    !CMSCollector::foregroundGCIsActive(); ++i) {
  7230     os::sleep(Thread::current(), 1, false);
  7231     ConcurrentMarkSweepThread::acknowledge_yield_request();
  7234   ConcurrentMarkSweepThread::synchronize(true);
  7235   _freelistLock->lock_without_safepoint_check();
  7236   _bitMap->lock()->lock_without_safepoint_check();
  7237   _collector->startTimer();
  7241 //////////////////////////////////////////////////////////////////
  7242 // SurvivorSpacePrecleanClosure
  7243 //////////////////////////////////////////////////////////////////
  7244 // This (single-threaded) closure is used to preclean the oops in
  7245 // the survivor spaces.
  7246 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
  7248   HeapWord* addr = (HeapWord*)p;
  7249   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
  7250   assert(!_span.contains(addr), "we are scanning the survivor spaces");
  7251   assert(p->klass_or_null() != NULL, "object should be initializd");
  7252   // an initialized object; ignore mark word in verification below
  7253   // since we are running concurrent with mutators
  7254   assert(p->is_oop(true), "should be an oop");
  7255   // Note that we do not yield while we iterate over
  7256   // the interior oops of p, pushing the relevant ones
  7257   // on our marking stack.
  7258   size_t size = p->oop_iterate(_scanning_closure);
  7259   do_yield_check();
  7260   // Observe that below, we do not abandon the preclean
  7261   // phase as soon as we should; rather we empty the
  7262   // marking stack before returning. This is to satisfy
  7263   // some existing assertions. In general, it may be a
  7264   // good idea to abort immediately and complete the marking
  7265   // from the grey objects at a later time.
  7266   while (!_mark_stack->isEmpty()) {
  7267     oop new_oop = _mark_stack->pop();
  7268     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
  7269     assert(_bit_map->isMarked((HeapWord*)new_oop),
  7270            "only grey objects on this stack");
  7271     // iterate over the oops in this oop, marking and pushing
  7272     // the ones in CMS heap (i.e. in _span).
  7273     new_oop->oop_iterate(_scanning_closure);
  7274     // check if it's time to yield
  7275     do_yield_check();
  7277   unsigned int after_count =
  7278     GenCollectedHeap::heap()->total_collections();
  7279   bool abort = (_before_count != after_count) ||
  7280                _collector->should_abort_preclean();
  7281   return abort ? 0 : size;
  7284 void SurvivorSpacePrecleanClosure::do_yield_work() {
  7285   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  7286          "CMS thread should hold CMS token");
  7287   assert_lock_strong(_bit_map->lock());
  7288   // Relinquish the bit map lock
  7289   _bit_map->lock()->unlock();
  7290   ConcurrentMarkSweepThread::desynchronize(true);
  7291   ConcurrentMarkSweepThread::acknowledge_yield_request();
  7292   _collector->stopTimer();
  7293   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  7294   if (PrintCMSStatistics != 0) {
  7295     _collector->incrementYields();
  7297   _collector->icms_wait();
  7299   // See the comment in coordinator_yield()
  7300   for (unsigned i = 0; i < CMSYieldSleepCount &&
  7301                        ConcurrentMarkSweepThread::should_yield() &&
  7302                        !CMSCollector::foregroundGCIsActive(); ++i) {
  7303     os::sleep(Thread::current(), 1, false);
  7304     ConcurrentMarkSweepThread::acknowledge_yield_request();
  7307   ConcurrentMarkSweepThread::synchronize(true);
  7308   _bit_map->lock()->lock_without_safepoint_check();
  7309   _collector->startTimer();
  7312 // This closure is used to rescan the marked objects on the dirty cards
  7313 // in the mod union table and the card table proper. In the parallel
  7314 // case, although the bitMap is shared, we do a single read so the
  7315 // isMarked() query is "safe".
  7316 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
  7317   // Ignore mark word because we are running concurrent with mutators
  7318   assert(p->is_oop_or_null(true), "expected an oop or null");
  7319   HeapWord* addr = (HeapWord*)p;
  7320   assert(_span.contains(addr), "we are scanning the CMS generation");
  7321   bool is_obj_array = false;
  7322   #ifdef ASSERT
  7323     if (!_parallel) {
  7324       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
  7325       assert(_collector->overflow_list_is_empty(),
  7326              "overflow list should be empty");
  7329   #endif // ASSERT
  7330   if (_bit_map->isMarked(addr)) {
  7331     // Obj arrays are precisely marked, non-arrays are not;
  7332     // so we scan objArrays precisely and non-arrays in their
  7333     // entirety.
  7334     if (p->is_objArray()) {
  7335       is_obj_array = true;
  7336       if (_parallel) {
  7337         p->oop_iterate(_par_scan_closure, mr);
  7338       } else {
  7339         p->oop_iterate(_scan_closure, mr);
  7341     } else {
  7342       if (_parallel) {
  7343         p->oop_iterate(_par_scan_closure);
  7344       } else {
  7345         p->oop_iterate(_scan_closure);
  7349   #ifdef ASSERT
  7350     if (!_parallel) {
  7351       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
  7352       assert(_collector->overflow_list_is_empty(),
  7353              "overflow list should be empty");
  7356   #endif // ASSERT
  7357   return is_obj_array;
  7360 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
  7361                         MemRegion span,
  7362                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
  7363                         bool should_yield, bool verifying):
  7364   _collector(collector),
  7365   _span(span),
  7366   _bitMap(bitMap),
  7367   _mut(&collector->_modUnionTable),
  7368   _markStack(markStack),
  7369   _yield(should_yield),
  7370   _skipBits(0)
  7372   assert(_markStack->isEmpty(), "stack should be empty");
  7373   _finger = _bitMap->startWord();
  7374   _threshold = _finger;
  7375   assert(_collector->_restart_addr == NULL, "Sanity check");
  7376   assert(_span.contains(_finger), "Out of bounds _finger?");
  7377   DEBUG_ONLY(_verifying = verifying;)
  7380 void MarkFromRootsClosure::reset(HeapWord* addr) {
  7381   assert(_markStack->isEmpty(), "would cause duplicates on stack");
  7382   assert(_span.contains(addr), "Out of bounds _finger?");
  7383   _finger = addr;
  7384   _threshold = (HeapWord*)round_to(
  7385                  (intptr_t)_finger, CardTableModRefBS::card_size);
  7388 // Should revisit to see if this should be restructured for
  7389 // greater efficiency.
  7390 bool MarkFromRootsClosure::do_bit(size_t offset) {
  7391   if (_skipBits > 0) {
  7392     _skipBits--;
  7393     return true;
  7395   // convert offset into a HeapWord*
  7396   HeapWord* addr = _bitMap->startWord() + offset;
  7397   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
  7398          "address out of range");
  7399   assert(_bitMap->isMarked(addr), "tautology");
  7400   if (_bitMap->isMarked(addr+1)) {
  7401     // this is an allocated but not yet initialized object
  7402     assert(_skipBits == 0, "tautology");
  7403     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
  7404     oop p = oop(addr);
  7405     if (p->klass_or_null() == NULL) {
  7406       DEBUG_ONLY(if (!_verifying) {)
  7407         // We re-dirty the cards on which this object lies and increase
  7408         // the _threshold so that we'll come back to scan this object
  7409         // during the preclean or remark phase. (CMSCleanOnEnter)
  7410         if (CMSCleanOnEnter) {
  7411           size_t sz = _collector->block_size_using_printezis_bits(addr);
  7412           HeapWord* end_card_addr   = (HeapWord*)round_to(
  7413                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
  7414           MemRegion redirty_range = MemRegion(addr, end_card_addr);
  7415           assert(!redirty_range.is_empty(), "Arithmetical tautology");
  7416           // Bump _threshold to end_card_addr; note that
  7417           // _threshold cannot possibly exceed end_card_addr, anyhow.
  7418           // This prevents future clearing of the card as the scan proceeds
  7419           // to the right.
  7420           assert(_threshold <= end_card_addr,
  7421                  "Because we are just scanning into this object");
  7422           if (_threshold < end_card_addr) {
  7423             _threshold = end_card_addr;
  7425           if (p->klass_or_null() != NULL) {
  7426             // Redirty the range of cards...
  7427             _mut->mark_range(redirty_range);
  7428           } // ...else the setting of klass will dirty the card anyway.
  7430       DEBUG_ONLY(})
  7431       return true;
  7434   scanOopsInOop(addr);
  7435   return true;
  7438 // We take a break if we've been at this for a while,
  7439 // so as to avoid monopolizing the locks involved.
  7440 void MarkFromRootsClosure::do_yield_work() {
  7441   // First give up the locks, then yield, then re-lock
  7442   // We should probably use a constructor/destructor idiom to
  7443   // do this unlock/lock or modify the MutexUnlocker class to
  7444   // serve our purpose. XXX
  7445   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  7446          "CMS thread should hold CMS token");
  7447   assert_lock_strong(_bitMap->lock());
  7448   _bitMap->lock()->unlock();
  7449   ConcurrentMarkSweepThread::desynchronize(true);
  7450   ConcurrentMarkSweepThread::acknowledge_yield_request();
  7451   _collector->stopTimer();
  7452   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  7453   if (PrintCMSStatistics != 0) {
  7454     _collector->incrementYields();
  7456   _collector->icms_wait();
  7458   // See the comment in coordinator_yield()
  7459   for (unsigned i = 0; i < CMSYieldSleepCount &&
  7460                        ConcurrentMarkSweepThread::should_yield() &&
  7461                        !CMSCollector::foregroundGCIsActive(); ++i) {
  7462     os::sleep(Thread::current(), 1, false);
  7463     ConcurrentMarkSweepThread::acknowledge_yield_request();
  7466   ConcurrentMarkSweepThread::synchronize(true);
  7467   _bitMap->lock()->lock_without_safepoint_check();
  7468   _collector->startTimer();
  7471 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
  7472   assert(_bitMap->isMarked(ptr), "expected bit to be set");
  7473   assert(_markStack->isEmpty(),
  7474          "should drain stack to limit stack usage");
  7475   // convert ptr to an oop preparatory to scanning
  7476   oop obj = oop(ptr);
  7477   // Ignore mark word in verification below, since we
  7478   // may be running concurrent with mutators.
  7479   assert(obj->is_oop(true), "should be an oop");
  7480   assert(_finger <= ptr, "_finger runneth ahead");
  7481   // advance the finger to right end of this object
  7482   _finger = ptr + obj->size();
  7483   assert(_finger > ptr, "we just incremented it above");
  7484   // On large heaps, it may take us some time to get through
  7485   // the marking phase (especially if running iCMS). During
  7486   // this time it's possible that a lot of mutations have
  7487   // accumulated in the card table and the mod union table --
  7488   // these mutation records are redundant until we have
  7489   // actually traced into the corresponding card.
  7490   // Here, we check whether advancing the finger would make
  7491   // us cross into a new card, and if so clear corresponding
  7492   // cards in the MUT (preclean them in the card-table in the
  7493   // future).
  7495   DEBUG_ONLY(if (!_verifying) {)
  7496     // The clean-on-enter optimization is disabled by default,
  7497     // until we fix 6178663.
  7498     if (CMSCleanOnEnter && (_finger > _threshold)) {
  7499       // [_threshold, _finger) represents the interval
  7500       // of cards to be cleared  in MUT (or precleaned in card table).
  7501       // The set of cards to be cleared is all those that overlap
  7502       // with the interval [_threshold, _finger); note that
  7503       // _threshold is always kept card-aligned but _finger isn't
  7504       // always card-aligned.
  7505       HeapWord* old_threshold = _threshold;
  7506       assert(old_threshold == (HeapWord*)round_to(
  7507               (intptr_t)old_threshold, CardTableModRefBS::card_size),
  7508              "_threshold should always be card-aligned");
  7509       _threshold = (HeapWord*)round_to(
  7510                      (intptr_t)_finger, CardTableModRefBS::card_size);
  7511       MemRegion mr(old_threshold, _threshold);
  7512       assert(!mr.is_empty(), "Control point invariant");
  7513       assert(_span.contains(mr), "Should clear within span");
  7514       _mut->clear_range(mr);
  7516   DEBUG_ONLY(})
  7517   // Note: the finger doesn't advance while we drain
  7518   // the stack below.
  7519   PushOrMarkClosure pushOrMarkClosure(_collector,
  7520                                       _span, _bitMap, _markStack,
  7521                                       _finger, this);
  7522   bool res = _markStack->push(obj);
  7523   assert(res, "Empty non-zero size stack should have space for single push");
  7524   while (!_markStack->isEmpty()) {
  7525     oop new_oop = _markStack->pop();
  7526     // Skip verifying header mark word below because we are
  7527     // running concurrent with mutators.
  7528     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
  7529     // now scan this oop's oops
  7530     new_oop->oop_iterate(&pushOrMarkClosure);
  7531     do_yield_check();
  7533   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
  7536 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
  7537                        CMSCollector* collector, MemRegion span,
  7538                        CMSBitMap* bit_map,
  7539                        OopTaskQueue* work_queue,
  7540                        CMSMarkStack*  overflow_stack,
  7541                        bool should_yield):
  7542   _collector(collector),
  7543   _whole_span(collector->_span),
  7544   _span(span),
  7545   _bit_map(bit_map),
  7546   _mut(&collector->_modUnionTable),
  7547   _work_queue(work_queue),
  7548   _overflow_stack(overflow_stack),
  7549   _yield(should_yield),
  7550   _skip_bits(0),
  7551   _task(task)
  7553   assert(_work_queue->size() == 0, "work_queue should be empty");
  7554   _finger = span.start();
  7555   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
  7556   assert(_span.contains(_finger), "Out of bounds _finger?");
  7559 // Should revisit to see if this should be restructured for
  7560 // greater efficiency.
  7561 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
  7562   if (_skip_bits > 0) {
  7563     _skip_bits--;
  7564     return true;
  7566   // convert offset into a HeapWord*
  7567   HeapWord* addr = _bit_map->startWord() + offset;
  7568   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
  7569          "address out of range");
  7570   assert(_bit_map->isMarked(addr), "tautology");
  7571   if (_bit_map->isMarked(addr+1)) {
  7572     // this is an allocated object that might not yet be initialized
  7573     assert(_skip_bits == 0, "tautology");
  7574     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
  7575     oop p = oop(addr);
  7576     if (p->klass_or_null() == NULL) {
  7577       // in the case of Clean-on-Enter optimization, redirty card
  7578       // and avoid clearing card by increasing  the threshold.
  7579       return true;
  7582   scan_oops_in_oop(addr);
  7583   return true;
  7586 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
  7587   assert(_bit_map->isMarked(ptr), "expected bit to be set");
  7588   // Should we assert that our work queue is empty or
  7589   // below some drain limit?
  7590   assert(_work_queue->size() == 0,
  7591          "should drain stack to limit stack usage");
  7592   // convert ptr to an oop preparatory to scanning
  7593   oop obj = oop(ptr);
  7594   // Ignore mark word in verification below, since we
  7595   // may be running concurrent with mutators.
  7596   assert(obj->is_oop(true), "should be an oop");
  7597   assert(_finger <= ptr, "_finger runneth ahead");
  7598   // advance the finger to right end of this object
  7599   _finger = ptr + obj->size();
  7600   assert(_finger > ptr, "we just incremented it above");
  7601   // On large heaps, it may take us some time to get through
  7602   // the marking phase (especially if running iCMS). During
  7603   // this time it's possible that a lot of mutations have
  7604   // accumulated in the card table and the mod union table --
  7605   // these mutation records are redundant until we have
  7606   // actually traced into the corresponding card.
  7607   // Here, we check whether advancing the finger would make
  7608   // us cross into a new card, and if so clear corresponding
  7609   // cards in the MUT (preclean them in the card-table in the
  7610   // future).
  7612   // The clean-on-enter optimization is disabled by default,
  7613   // until we fix 6178663.
  7614   if (CMSCleanOnEnter && (_finger > _threshold)) {
  7615     // [_threshold, _finger) represents the interval
  7616     // of cards to be cleared  in MUT (or precleaned in card table).
  7617     // The set of cards to be cleared is all those that overlap
  7618     // with the interval [_threshold, _finger); note that
  7619     // _threshold is always kept card-aligned but _finger isn't
  7620     // always card-aligned.
  7621     HeapWord* old_threshold = _threshold;
  7622     assert(old_threshold == (HeapWord*)round_to(
  7623             (intptr_t)old_threshold, CardTableModRefBS::card_size),
  7624            "_threshold should always be card-aligned");
  7625     _threshold = (HeapWord*)round_to(
  7626                    (intptr_t)_finger, CardTableModRefBS::card_size);
  7627     MemRegion mr(old_threshold, _threshold);
  7628     assert(!mr.is_empty(), "Control point invariant");
  7629     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
  7630     _mut->clear_range(mr);
  7633   // Note: the local finger doesn't advance while we drain
  7634   // the stack below, but the global finger sure can and will.
  7635   HeapWord** gfa = _task->global_finger_addr();
  7636   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
  7637                                       _span, _bit_map,
  7638                                       _work_queue,
  7639                                       _overflow_stack,
  7640                                       _finger,
  7641                                       gfa, this);
  7642   bool res = _work_queue->push(obj);   // overflow could occur here
  7643   assert(res, "Will hold once we use workqueues");
  7644   while (true) {
  7645     oop new_oop;
  7646     if (!_work_queue->pop_local(new_oop)) {
  7647       // We emptied our work_queue; check if there's stuff that can
  7648       // be gotten from the overflow stack.
  7649       if (CMSConcMarkingTask::get_work_from_overflow_stack(
  7650             _overflow_stack, _work_queue)) {
  7651         do_yield_check();
  7652         continue;
  7653       } else {  // done
  7654         break;
  7657     // Skip verifying header mark word below because we are
  7658     // running concurrent with mutators.
  7659     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
  7660     // now scan this oop's oops
  7661     new_oop->oop_iterate(&pushOrMarkClosure);
  7662     do_yield_check();
  7664   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
  7667 // Yield in response to a request from VM Thread or
  7668 // from mutators.
  7669 void Par_MarkFromRootsClosure::do_yield_work() {
  7670   assert(_task != NULL, "sanity");
  7671   _task->yield();
  7674 // A variant of the above used for verifying CMS marking work.
  7675 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
  7676                         MemRegion span,
  7677                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
  7678                         CMSMarkStack*  mark_stack):
  7679   _collector(collector),
  7680   _span(span),
  7681   _verification_bm(verification_bm),
  7682   _cms_bm(cms_bm),
  7683   _mark_stack(mark_stack),
  7684   _pam_verify_closure(collector, span, verification_bm, cms_bm,
  7685                       mark_stack)
  7687   assert(_mark_stack->isEmpty(), "stack should be empty");
  7688   _finger = _verification_bm->startWord();
  7689   assert(_collector->_restart_addr == NULL, "Sanity check");
  7690   assert(_span.contains(_finger), "Out of bounds _finger?");
  7693 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
  7694   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
  7695   assert(_span.contains(addr), "Out of bounds _finger?");
  7696   _finger = addr;
  7699 // Should revisit to see if this should be restructured for
  7700 // greater efficiency.
  7701 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
  7702   // convert offset into a HeapWord*
  7703   HeapWord* addr = _verification_bm->startWord() + offset;
  7704   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
  7705          "address out of range");
  7706   assert(_verification_bm->isMarked(addr), "tautology");
  7707   assert(_cms_bm->isMarked(addr), "tautology");
  7709   assert(_mark_stack->isEmpty(),
  7710          "should drain stack to limit stack usage");
  7711   // convert addr to an oop preparatory to scanning
  7712   oop obj = oop(addr);
  7713   assert(obj->is_oop(), "should be an oop");
  7714   assert(_finger <= addr, "_finger runneth ahead");
  7715   // advance the finger to right end of this object
  7716   _finger = addr + obj->size();
  7717   assert(_finger > addr, "we just incremented it above");
  7718   // Note: the finger doesn't advance while we drain
  7719   // the stack below.
  7720   bool res = _mark_stack->push(obj);
  7721   assert(res, "Empty non-zero size stack should have space for single push");
  7722   while (!_mark_stack->isEmpty()) {
  7723     oop new_oop = _mark_stack->pop();
  7724     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
  7725     // now scan this oop's oops
  7726     new_oop->oop_iterate(&_pam_verify_closure);
  7728   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
  7729   return true;
  7732 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
  7733   CMSCollector* collector, MemRegion span,
  7734   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
  7735   CMSMarkStack*  mark_stack):
  7736   CMSOopClosure(collector->ref_processor()),
  7737   _collector(collector),
  7738   _span(span),
  7739   _verification_bm(verification_bm),
  7740   _cms_bm(cms_bm),
  7741   _mark_stack(mark_stack)
  7742 { }
  7744 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
  7745 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
  7747 // Upon stack overflow, we discard (part of) the stack,
  7748 // remembering the least address amongst those discarded
  7749 // in CMSCollector's _restart_address.
  7750 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
  7751   // Remember the least grey address discarded
  7752   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
  7753   _collector->lower_restart_addr(ra);
  7754   _mark_stack->reset();  // discard stack contents
  7755   _mark_stack->expand(); // expand the stack if possible
  7758 void PushAndMarkVerifyClosure::do_oop(oop obj) {
  7759   assert(obj->is_oop_or_null(), "expected an oop or NULL");
  7760   HeapWord* addr = (HeapWord*)obj;
  7761   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
  7762     // Oop lies in _span and isn't yet grey or black
  7763     _verification_bm->mark(addr);            // now grey
  7764     if (!_cms_bm->isMarked(addr)) {
  7765       oop(addr)->print();
  7766       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
  7767                              addr);
  7768       fatal("... aborting");
  7771     if (!_mark_stack->push(obj)) { // stack overflow
  7772       if (PrintCMSStatistics != 0) {
  7773         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
  7774                                SIZE_FORMAT, _mark_stack->capacity());
  7776       assert(_mark_stack->isFull(), "Else push should have succeeded");
  7777       handle_stack_overflow(addr);
  7779     // anything including and to the right of _finger
  7780     // will be scanned as we iterate over the remainder of the
  7781     // bit map
  7785 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
  7786                      MemRegion span,
  7787                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
  7788                      HeapWord* finger, MarkFromRootsClosure* parent) :
  7789   CMSOopClosure(collector->ref_processor()),
  7790   _collector(collector),
  7791   _span(span),
  7792   _bitMap(bitMap),
  7793   _markStack(markStack),
  7794   _finger(finger),
  7795   _parent(parent)
  7796 { }
  7798 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
  7799                      MemRegion span,
  7800                      CMSBitMap* bit_map,
  7801                      OopTaskQueue* work_queue,
  7802                      CMSMarkStack*  overflow_stack,
  7803                      HeapWord* finger,
  7804                      HeapWord** global_finger_addr,
  7805                      Par_MarkFromRootsClosure* parent) :
  7806   CMSOopClosure(collector->ref_processor()),
  7807   _collector(collector),
  7808   _whole_span(collector->_span),
  7809   _span(span),
  7810   _bit_map(bit_map),
  7811   _work_queue(work_queue),
  7812   _overflow_stack(overflow_stack),
  7813   _finger(finger),
  7814   _global_finger_addr(global_finger_addr),
  7815   _parent(parent)
  7816 { }
  7818 // Assumes thread-safe access by callers, who are
  7819 // responsible for mutual exclusion.
  7820 void CMSCollector::lower_restart_addr(HeapWord* low) {
  7821   assert(_span.contains(low), "Out of bounds addr");
  7822   if (_restart_addr == NULL) {
  7823     _restart_addr = low;
  7824   } else {
  7825     _restart_addr = MIN2(_restart_addr, low);
  7829 // Upon stack overflow, we discard (part of) the stack,
  7830 // remembering the least address amongst those discarded
  7831 // in CMSCollector's _restart_address.
  7832 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
  7833   // Remember the least grey address discarded
  7834   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
  7835   _collector->lower_restart_addr(ra);
  7836   _markStack->reset();  // discard stack contents
  7837   _markStack->expand(); // expand the stack if possible
  7840 // Upon stack overflow, we discard (part of) the stack,
  7841 // remembering the least address amongst those discarded
  7842 // in CMSCollector's _restart_address.
  7843 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
  7844   // We need to do this under a mutex to prevent other
  7845   // workers from interfering with the work done below.
  7846   MutexLockerEx ml(_overflow_stack->par_lock(),
  7847                    Mutex::_no_safepoint_check_flag);
  7848   // Remember the least grey address discarded
  7849   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
  7850   _collector->lower_restart_addr(ra);
  7851   _overflow_stack->reset();  // discard stack contents
  7852   _overflow_stack->expand(); // expand the stack if possible
  7855 void CMKlassClosure::do_klass(Klass* k) {
  7856   assert(_oop_closure != NULL, "Not initialized?");
  7857   k->oops_do(_oop_closure);
  7860 void PushOrMarkClosure::do_oop(oop obj) {
  7861   // Ignore mark word because we are running concurrent with mutators.
  7862   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
  7863   HeapWord* addr = (HeapWord*)obj;
  7864   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
  7865     // Oop lies in _span and isn't yet grey or black
  7866     _bitMap->mark(addr);            // now grey
  7867     if (addr < _finger) {
  7868       // the bit map iteration has already either passed, or
  7869       // sampled, this bit in the bit map; we'll need to
  7870       // use the marking stack to scan this oop's oops.
  7871       bool simulate_overflow = false;
  7872       NOT_PRODUCT(
  7873         if (CMSMarkStackOverflowALot &&
  7874             _collector->simulate_overflow()) {
  7875           // simulate a stack overflow
  7876           simulate_overflow = true;
  7879       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
  7880         if (PrintCMSStatistics != 0) {
  7881           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
  7882                                  SIZE_FORMAT, _markStack->capacity());
  7884         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
  7885         handle_stack_overflow(addr);
  7888     // anything including and to the right of _finger
  7889     // will be scanned as we iterate over the remainder of the
  7890     // bit map
  7891     do_yield_check();
  7895 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
  7896 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
  7898 void Par_PushOrMarkClosure::do_oop(oop obj) {
  7899   // Ignore mark word because we are running concurrent with mutators.
  7900   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
  7901   HeapWord* addr = (HeapWord*)obj;
  7902   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
  7903     // Oop lies in _span and isn't yet grey or black
  7904     // We read the global_finger (volatile read) strictly after marking oop
  7905     bool res = _bit_map->par_mark(addr);    // now grey
  7906     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
  7907     // Should we push this marked oop on our stack?
  7908     // -- if someone else marked it, nothing to do
  7909     // -- if target oop is above global finger nothing to do
  7910     // -- if target oop is in chunk and above local finger
  7911     //      then nothing to do
  7912     // -- else push on work queue
  7913     if (   !res       // someone else marked it, they will deal with it
  7914         || (addr >= *gfa)  // will be scanned in a later task
  7915         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
  7916       return;
  7918     // the bit map iteration has already either passed, or
  7919     // sampled, this bit in the bit map; we'll need to
  7920     // use the marking stack to scan this oop's oops.
  7921     bool simulate_overflow = false;
  7922     NOT_PRODUCT(
  7923       if (CMSMarkStackOverflowALot &&
  7924           _collector->simulate_overflow()) {
  7925         // simulate a stack overflow
  7926         simulate_overflow = true;
  7929     if (simulate_overflow ||
  7930         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
  7931       // stack overflow
  7932       if (PrintCMSStatistics != 0) {
  7933         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
  7934                                SIZE_FORMAT, _overflow_stack->capacity());
  7936       // We cannot assert that the overflow stack is full because
  7937       // it may have been emptied since.
  7938       assert(simulate_overflow ||
  7939              _work_queue->size() == _work_queue->max_elems(),
  7940             "Else push should have succeeded");
  7941       handle_stack_overflow(addr);
  7943     do_yield_check();
  7947 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
  7948 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
  7950 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
  7951                                        MemRegion span,
  7952                                        ReferenceProcessor* rp,
  7953                                        CMSBitMap* bit_map,
  7954                                        CMSBitMap* mod_union_table,
  7955                                        CMSMarkStack*  mark_stack,
  7956                                        bool           concurrent_precleaning):
  7957   CMSOopClosure(rp),
  7958   _collector(collector),
  7959   _span(span),
  7960   _bit_map(bit_map),
  7961   _mod_union_table(mod_union_table),
  7962   _mark_stack(mark_stack),
  7963   _concurrent_precleaning(concurrent_precleaning)
  7965   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
  7968 // Grey object rescan during pre-cleaning and second checkpoint phases --
  7969 // the non-parallel version (the parallel version appears further below.)
  7970 void PushAndMarkClosure::do_oop(oop obj) {
  7971   // Ignore mark word verification. If during concurrent precleaning,
  7972   // the object monitor may be locked. If during the checkpoint
  7973   // phases, the object may already have been reached by a  different
  7974   // path and may be at the end of the global overflow list (so
  7975   // the mark word may be NULL).
  7976   assert(obj->is_oop_or_null(true /* ignore mark word */),
  7977          "expected an oop or NULL");
  7978   HeapWord* addr = (HeapWord*)obj;
  7979   // Check if oop points into the CMS generation
  7980   // and is not marked
  7981   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
  7982     // a white object ...
  7983     _bit_map->mark(addr);         // ... now grey
  7984     // push on the marking stack (grey set)
  7985     bool simulate_overflow = false;
  7986     NOT_PRODUCT(
  7987       if (CMSMarkStackOverflowALot &&
  7988           _collector->simulate_overflow()) {
  7989         // simulate a stack overflow
  7990         simulate_overflow = true;
  7993     if (simulate_overflow || !_mark_stack->push(obj)) {
  7994       if (_concurrent_precleaning) {
  7995          // During precleaning we can just dirty the appropriate card(s)
  7996          // in the mod union table, thus ensuring that the object remains
  7997          // in the grey set  and continue. In the case of object arrays
  7998          // we need to dirty all of the cards that the object spans,
  7999          // since the rescan of object arrays will be limited to the
  8000          // dirty cards.
  8001          // Note that no one can be intefering with us in this action
  8002          // of dirtying the mod union table, so no locking or atomics
  8003          // are required.
  8004          if (obj->is_objArray()) {
  8005            size_t sz = obj->size();
  8006            HeapWord* end_card_addr = (HeapWord*)round_to(
  8007                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
  8008            MemRegion redirty_range = MemRegion(addr, end_card_addr);
  8009            assert(!redirty_range.is_empty(), "Arithmetical tautology");
  8010            _mod_union_table->mark_range(redirty_range);
  8011          } else {
  8012            _mod_union_table->mark(addr);
  8014          _collector->_ser_pmc_preclean_ovflw++;
  8015       } else {
  8016          // During the remark phase, we need to remember this oop
  8017          // in the overflow list.
  8018          _collector->push_on_overflow_list(obj);
  8019          _collector->_ser_pmc_remark_ovflw++;
  8025 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
  8026                                                MemRegion span,
  8027                                                ReferenceProcessor* rp,
  8028                                                CMSBitMap* bit_map,
  8029                                                OopTaskQueue* work_queue):
  8030   CMSOopClosure(rp),
  8031   _collector(collector),
  8032   _span(span),
  8033   _bit_map(bit_map),
  8034   _work_queue(work_queue)
  8036   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
  8039 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
  8040 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
  8042 // Grey object rescan during second checkpoint phase --
  8043 // the parallel version.
  8044 void Par_PushAndMarkClosure::do_oop(oop obj) {
  8045   // In the assert below, we ignore the mark word because
  8046   // this oop may point to an already visited object that is
  8047   // on the overflow stack (in which case the mark word has
  8048   // been hijacked for chaining into the overflow stack --
  8049   // if this is the last object in the overflow stack then
  8050   // its mark word will be NULL). Because this object may
  8051   // have been subsequently popped off the global overflow
  8052   // stack, and the mark word possibly restored to the prototypical
  8053   // value, by the time we get to examined this failing assert in
  8054   // the debugger, is_oop_or_null(false) may subsequently start
  8055   // to hold.
  8056   assert(obj->is_oop_or_null(true),
  8057          "expected an oop or NULL");
  8058   HeapWord* addr = (HeapWord*)obj;
  8059   // Check if oop points into the CMS generation
  8060   // and is not marked
  8061   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
  8062     // a white object ...
  8063     // If we manage to "claim" the object, by being the
  8064     // first thread to mark it, then we push it on our
  8065     // marking stack
  8066     if (_bit_map->par_mark(addr)) {     // ... now grey
  8067       // push on work queue (grey set)
  8068       bool simulate_overflow = false;
  8069       NOT_PRODUCT(
  8070         if (CMSMarkStackOverflowALot &&
  8071             _collector->par_simulate_overflow()) {
  8072           // simulate a stack overflow
  8073           simulate_overflow = true;
  8076       if (simulate_overflow || !_work_queue->push(obj)) {
  8077         _collector->par_push_on_overflow_list(obj);
  8078         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
  8080     } // Else, some other thread got there first
  8084 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
  8085 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
  8087 void CMSPrecleanRefsYieldClosure::do_yield_work() {
  8088   Mutex* bml = _collector->bitMapLock();
  8089   assert_lock_strong(bml);
  8090   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  8091          "CMS thread should hold CMS token");
  8093   bml->unlock();
  8094   ConcurrentMarkSweepThread::desynchronize(true);
  8096   ConcurrentMarkSweepThread::acknowledge_yield_request();
  8098   _collector->stopTimer();
  8099   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  8100   if (PrintCMSStatistics != 0) {
  8101     _collector->incrementYields();
  8103   _collector->icms_wait();
  8105   // See the comment in coordinator_yield()
  8106   for (unsigned i = 0; i < CMSYieldSleepCount &&
  8107                        ConcurrentMarkSweepThread::should_yield() &&
  8108                        !CMSCollector::foregroundGCIsActive(); ++i) {
  8109     os::sleep(Thread::current(), 1, false);
  8110     ConcurrentMarkSweepThread::acknowledge_yield_request();
  8113   ConcurrentMarkSweepThread::synchronize(true);
  8114   bml->lock();
  8116   _collector->startTimer();
  8119 bool CMSPrecleanRefsYieldClosure::should_return() {
  8120   if (ConcurrentMarkSweepThread::should_yield()) {
  8121     do_yield_work();
  8123   return _collector->foregroundGCIsActive();
  8126 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
  8127   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
  8128          "mr should be aligned to start at a card boundary");
  8129   // We'd like to assert:
  8130   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
  8131   //        "mr should be a range of cards");
  8132   // However, that would be too strong in one case -- the last
  8133   // partition ends at _unallocated_block which, in general, can be
  8134   // an arbitrary boundary, not necessarily card aligned.
  8135   if (PrintCMSStatistics != 0) {
  8136     _num_dirty_cards +=
  8137          mr.word_size()/CardTableModRefBS::card_size_in_words;
  8139   _space->object_iterate_mem(mr, &_scan_cl);
  8142 SweepClosure::SweepClosure(CMSCollector* collector,
  8143                            ConcurrentMarkSweepGeneration* g,
  8144                            CMSBitMap* bitMap, bool should_yield) :
  8145   _collector(collector),
  8146   _g(g),
  8147   _sp(g->cmsSpace()),
  8148   _limit(_sp->sweep_limit()),
  8149   _freelistLock(_sp->freelistLock()),
  8150   _bitMap(bitMap),
  8151   _yield(should_yield),
  8152   _inFreeRange(false),           // No free range at beginning of sweep
  8153   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
  8154   _lastFreeRangeCoalesced(false),
  8155   _freeFinger(g->used_region().start())
  8157   NOT_PRODUCT(
  8158     _numObjectsFreed = 0;
  8159     _numWordsFreed   = 0;
  8160     _numObjectsLive = 0;
  8161     _numWordsLive = 0;
  8162     _numObjectsAlreadyFree = 0;
  8163     _numWordsAlreadyFree = 0;
  8164     _last_fc = NULL;
  8166     _sp->initializeIndexedFreeListArrayReturnedBytes();
  8167     _sp->dictionary()->initialize_dict_returned_bytes();
  8169   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
  8170          "sweep _limit out of bounds");
  8171   if (CMSTraceSweeper) {
  8172     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
  8173                         _limit);
  8177 void SweepClosure::print_on(outputStream* st) const {
  8178   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
  8179                 _sp->bottom(), _sp->end());
  8180   tty->print_cr("_limit = " PTR_FORMAT, _limit);
  8181   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
  8182   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
  8183   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
  8184                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
  8187 #ifndef PRODUCT
  8188 // Assertion checking only:  no useful work in product mode --
  8189 // however, if any of the flags below become product flags,
  8190 // you may need to review this code to see if it needs to be
  8191 // enabled in product mode.
  8192 SweepClosure::~SweepClosure() {
  8193   assert_lock_strong(_freelistLock);
  8194   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
  8195          "sweep _limit out of bounds");
  8196   if (inFreeRange()) {
  8197     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
  8198     print();
  8199     ShouldNotReachHere();
  8201   if (Verbose && PrintGC) {
  8202     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
  8203                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
  8204     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
  8205                            SIZE_FORMAT" bytes  "
  8206       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
  8207       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
  8208       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
  8209     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
  8210                         * sizeof(HeapWord);
  8211     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
  8213     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
  8214       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
  8215       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
  8216       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
  8217       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
  8218       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
  8219         indexListReturnedBytes);
  8220       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
  8221         dict_returned_bytes);
  8224   if (CMSTraceSweeper) {
  8225     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
  8226                            _limit);
  8229 #endif  // PRODUCT
  8231 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
  8232     bool freeRangeInFreeLists) {
  8233   if (CMSTraceSweeper) {
  8234     gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
  8235                freeFinger, freeRangeInFreeLists);
  8237   assert(!inFreeRange(), "Trampling existing free range");
  8238   set_inFreeRange(true);
  8239   set_lastFreeRangeCoalesced(false);
  8241   set_freeFinger(freeFinger);
  8242   set_freeRangeInFreeLists(freeRangeInFreeLists);
  8243   if (CMSTestInFreeList) {
  8244     if (freeRangeInFreeLists) {
  8245       FreeChunk* fc = (FreeChunk*) freeFinger;
  8246       assert(fc->is_free(), "A chunk on the free list should be free.");
  8247       assert(fc->size() > 0, "Free range should have a size");
  8248       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
  8253 // Note that the sweeper runs concurrently with mutators. Thus,
  8254 // it is possible for direct allocation in this generation to happen
  8255 // in the middle of the sweep. Note that the sweeper also coalesces
  8256 // contiguous free blocks. Thus, unless the sweeper and the allocator
  8257 // synchronize appropriately freshly allocated blocks may get swept up.
  8258 // This is accomplished by the sweeper locking the free lists while
  8259 // it is sweeping. Thus blocks that are determined to be free are
  8260 // indeed free. There is however one additional complication:
  8261 // blocks that have been allocated since the final checkpoint and
  8262 // mark, will not have been marked and so would be treated as
  8263 // unreachable and swept up. To prevent this, the allocator marks
  8264 // the bit map when allocating during the sweep phase. This leads,
  8265 // however, to a further complication -- objects may have been allocated
  8266 // but not yet initialized -- in the sense that the header isn't yet
  8267 // installed. The sweeper can not then determine the size of the block
  8268 // in order to skip over it. To deal with this case, we use a technique
  8269 // (due to Printezis) to encode such uninitialized block sizes in the
  8270 // bit map. Since the bit map uses a bit per every HeapWord, but the
  8271 // CMS generation has a minimum object size of 3 HeapWords, it follows
  8272 // that "normal marks" won't be adjacent in the bit map (there will
  8273 // always be at least two 0 bits between successive 1 bits). We make use
  8274 // of these "unused" bits to represent uninitialized blocks -- the bit
  8275 // corresponding to the start of the uninitialized object and the next
  8276 // bit are both set. Finally, a 1 bit marks the end of the object that
  8277 // started with the two consecutive 1 bits to indicate its potentially
  8278 // uninitialized state.
  8280 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
  8281   FreeChunk* fc = (FreeChunk*)addr;
  8282   size_t res;
  8284   // Check if we are done sweeping. Below we check "addr >= _limit" rather
  8285   // than "addr == _limit" because although _limit was a block boundary when
  8286   // we started the sweep, it may no longer be one because heap expansion
  8287   // may have caused us to coalesce the block ending at the address _limit
  8288   // with a newly expanded chunk (this happens when _limit was set to the
  8289   // previous _end of the space), so we may have stepped past _limit:
  8290   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
  8291   if (addr >= _limit) { // we have swept up to or past the limit: finish up
  8292     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
  8293            "sweep _limit out of bounds");
  8294     assert(addr < _sp->end(), "addr out of bounds");
  8295     // Flush any free range we might be holding as a single
  8296     // coalesced chunk to the appropriate free list.
  8297     if (inFreeRange()) {
  8298       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
  8299              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
  8300       flush_cur_free_chunk(freeFinger(),
  8301                            pointer_delta(addr, freeFinger()));
  8302       if (CMSTraceSweeper) {
  8303         gclog_or_tty->print("Sweep: last chunk: ");
  8304         gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
  8305                    "[coalesced:"SIZE_FORMAT"]\n",
  8306                    freeFinger(), pointer_delta(addr, freeFinger()),
  8307                    lastFreeRangeCoalesced());
  8311     // help the iterator loop finish
  8312     return pointer_delta(_sp->end(), addr);
  8315   assert(addr < _limit, "sweep invariant");
  8316   // check if we should yield
  8317   do_yield_check(addr);
  8318   if (fc->is_free()) {
  8319     // Chunk that is already free
  8320     res = fc->size();
  8321     do_already_free_chunk(fc);
  8322     debug_only(_sp->verifyFreeLists());
  8323     // If we flush the chunk at hand in lookahead_and_flush()
  8324     // and it's coalesced with a preceding chunk, then the
  8325     // process of "mangling" the payload of the coalesced block
  8326     // will cause erasure of the size information from the
  8327     // (erstwhile) header of all the coalesced blocks but the
  8328     // first, so the first disjunct in the assert will not hold
  8329     // in that specific case (in which case the second disjunct
  8330     // will hold).
  8331     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
  8332            "Otherwise the size info doesn't change at this step");
  8333     NOT_PRODUCT(
  8334       _numObjectsAlreadyFree++;
  8335       _numWordsAlreadyFree += res;
  8337     NOT_PRODUCT(_last_fc = fc;)
  8338   } else if (!_bitMap->isMarked(addr)) {
  8339     // Chunk is fresh garbage
  8340     res = do_garbage_chunk(fc);
  8341     debug_only(_sp->verifyFreeLists());
  8342     NOT_PRODUCT(
  8343       _numObjectsFreed++;
  8344       _numWordsFreed += res;
  8346   } else {
  8347     // Chunk that is alive.
  8348     res = do_live_chunk(fc);
  8349     debug_only(_sp->verifyFreeLists());
  8350     NOT_PRODUCT(
  8351         _numObjectsLive++;
  8352         _numWordsLive += res;
  8355   return res;
  8358 // For the smart allocation, record following
  8359 //  split deaths - a free chunk is removed from its free list because
  8360 //      it is being split into two or more chunks.
  8361 //  split birth - a free chunk is being added to its free list because
  8362 //      a larger free chunk has been split and resulted in this free chunk.
  8363 //  coal death - a free chunk is being removed from its free list because
  8364 //      it is being coalesced into a large free chunk.
  8365 //  coal birth - a free chunk is being added to its free list because
  8366 //      it was created when two or more free chunks where coalesced into
  8367 //      this free chunk.
  8368 //
  8369 // These statistics are used to determine the desired number of free
  8370 // chunks of a given size.  The desired number is chosen to be relative
  8371 // to the end of a CMS sweep.  The desired number at the end of a sweep
  8372 // is the
  8373 //      count-at-end-of-previous-sweep (an amount that was enough)
  8374 //              - count-at-beginning-of-current-sweep  (the excess)
  8375 //              + split-births  (gains in this size during interval)
  8376 //              - split-deaths  (demands on this size during interval)
  8377 // where the interval is from the end of one sweep to the end of the
  8378 // next.
  8379 //
  8380 // When sweeping the sweeper maintains an accumulated chunk which is
  8381 // the chunk that is made up of chunks that have been coalesced.  That
  8382 // will be termed the left-hand chunk.  A new chunk of garbage that
  8383 // is being considered for coalescing will be referred to as the
  8384 // right-hand chunk.
  8385 //
  8386 // When making a decision on whether to coalesce a right-hand chunk with
  8387 // the current left-hand chunk, the current count vs. the desired count
  8388 // of the left-hand chunk is considered.  Also if the right-hand chunk
  8389 // is near the large chunk at the end of the heap (see
  8390 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
  8391 // left-hand chunk is coalesced.
  8392 //
  8393 // When making a decision about whether to split a chunk, the desired count
  8394 // vs. the current count of the candidate to be split is also considered.
  8395 // If the candidate is underpopulated (currently fewer chunks than desired)
  8396 // a chunk of an overpopulated (currently more chunks than desired) size may
  8397 // be chosen.  The "hint" associated with a free list, if non-null, points
  8398 // to a free list which may be overpopulated.
  8399 //
  8401 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
  8402   const size_t size = fc->size();
  8403   // Chunks that cannot be coalesced are not in the
  8404   // free lists.
  8405   if (CMSTestInFreeList && !fc->cantCoalesce()) {
  8406     assert(_sp->verify_chunk_in_free_list(fc),
  8407       "free chunk should be in free lists");
  8409   // a chunk that is already free, should not have been
  8410   // marked in the bit map
  8411   HeapWord* const addr = (HeapWord*) fc;
  8412   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
  8413   // Verify that the bit map has no bits marked between
  8414   // addr and purported end of this block.
  8415   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
  8417   // Some chunks cannot be coalesced under any circumstances.
  8418   // See the definition of cantCoalesce().
  8419   if (!fc->cantCoalesce()) {
  8420     // This chunk can potentially be coalesced.
  8421     if (_sp->adaptive_freelists()) {
  8422       // All the work is done in
  8423       do_post_free_or_garbage_chunk(fc, size);
  8424     } else {  // Not adaptive free lists
  8425       // this is a free chunk that can potentially be coalesced by the sweeper;
  8426       if (!inFreeRange()) {
  8427         // if the next chunk is a free block that can't be coalesced
  8428         // it doesn't make sense to remove this chunk from the free lists
  8429         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
  8430         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
  8431         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
  8432             nextChunk->is_free()               &&     // ... which is free...
  8433             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
  8434           // nothing to do
  8435         } else {
  8436           // Potentially the start of a new free range:
  8437           // Don't eagerly remove it from the free lists.
  8438           // No need to remove it if it will just be put
  8439           // back again.  (Also from a pragmatic point of view
  8440           // if it is a free block in a region that is beyond
  8441           // any allocated blocks, an assertion will fail)
  8442           // Remember the start of a free run.
  8443           initialize_free_range(addr, true);
  8444           // end - can coalesce with next chunk
  8446       } else {
  8447         // the midst of a free range, we are coalescing
  8448         print_free_block_coalesced(fc);
  8449         if (CMSTraceSweeper) {
  8450           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
  8452         // remove it from the free lists
  8453         _sp->removeFreeChunkFromFreeLists(fc);
  8454         set_lastFreeRangeCoalesced(true);
  8455         // If the chunk is being coalesced and the current free range is
  8456         // in the free lists, remove the current free range so that it
  8457         // will be returned to the free lists in its entirety - all
  8458         // the coalesced pieces included.
  8459         if (freeRangeInFreeLists()) {
  8460           FreeChunk* ffc = (FreeChunk*) freeFinger();
  8461           assert(ffc->size() == pointer_delta(addr, freeFinger()),
  8462             "Size of free range is inconsistent with chunk size.");
  8463           if (CMSTestInFreeList) {
  8464             assert(_sp->verify_chunk_in_free_list(ffc),
  8465               "free range is not in free lists");
  8467           _sp->removeFreeChunkFromFreeLists(ffc);
  8468           set_freeRangeInFreeLists(false);
  8472     // Note that if the chunk is not coalescable (the else arm
  8473     // below), we unconditionally flush, without needing to do
  8474     // a "lookahead," as we do below.
  8475     if (inFreeRange()) lookahead_and_flush(fc, size);
  8476   } else {
  8477     // Code path common to both original and adaptive free lists.
  8479     // cant coalesce with previous block; this should be treated
  8480     // as the end of a free run if any
  8481     if (inFreeRange()) {
  8482       // we kicked some butt; time to pick up the garbage
  8483       assert(freeFinger() < addr, "freeFinger points too high");
  8484       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
  8486     // else, nothing to do, just continue
  8490 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
  8491   // This is a chunk of garbage.  It is not in any free list.
  8492   // Add it to a free list or let it possibly be coalesced into
  8493   // a larger chunk.
  8494   HeapWord* const addr = (HeapWord*) fc;
  8495   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
  8497   if (_sp->adaptive_freelists()) {
  8498     // Verify that the bit map has no bits marked between
  8499     // addr and purported end of just dead object.
  8500     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
  8502     do_post_free_or_garbage_chunk(fc, size);
  8503   } else {
  8504     if (!inFreeRange()) {
  8505       // start of a new free range
  8506       assert(size > 0, "A free range should have a size");
  8507       initialize_free_range(addr, false);
  8508     } else {
  8509       // this will be swept up when we hit the end of the
  8510       // free range
  8511       if (CMSTraceSweeper) {
  8512         gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
  8514       // If the chunk is being coalesced and the current free range is
  8515       // in the free lists, remove the current free range so that it
  8516       // will be returned to the free lists in its entirety - all
  8517       // the coalesced pieces included.
  8518       if (freeRangeInFreeLists()) {
  8519         FreeChunk* ffc = (FreeChunk*)freeFinger();
  8520         assert(ffc->size() == pointer_delta(addr, freeFinger()),
  8521           "Size of free range is inconsistent with chunk size.");
  8522         if (CMSTestInFreeList) {
  8523           assert(_sp->verify_chunk_in_free_list(ffc),
  8524             "free range is not in free lists");
  8526         _sp->removeFreeChunkFromFreeLists(ffc);
  8527         set_freeRangeInFreeLists(false);
  8529       set_lastFreeRangeCoalesced(true);
  8531     // this will be swept up when we hit the end of the free range
  8533     // Verify that the bit map has no bits marked between
  8534     // addr and purported end of just dead object.
  8535     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
  8537   assert(_limit >= addr + size,
  8538          "A freshly garbage chunk can't possibly straddle over _limit");
  8539   if (inFreeRange()) lookahead_and_flush(fc, size);
  8540   return size;
  8543 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
  8544   HeapWord* addr = (HeapWord*) fc;
  8545   // The sweeper has just found a live object. Return any accumulated
  8546   // left hand chunk to the free lists.
  8547   if (inFreeRange()) {
  8548     assert(freeFinger() < addr, "freeFinger points too high");
  8549     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
  8552   // This object is live: we'd normally expect this to be
  8553   // an oop, and like to assert the following:
  8554   // assert(oop(addr)->is_oop(), "live block should be an oop");
  8555   // However, as we commented above, this may be an object whose
  8556   // header hasn't yet been initialized.
  8557   size_t size;
  8558   assert(_bitMap->isMarked(addr), "Tautology for this control point");
  8559   if (_bitMap->isMarked(addr + 1)) {
  8560     // Determine the size from the bit map, rather than trying to
  8561     // compute it from the object header.
  8562     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
  8563     size = pointer_delta(nextOneAddr + 1, addr);
  8564     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  8565            "alignment problem");
  8567 #ifdef ASSERT
  8568       if (oop(addr)->klass_or_null() != NULL) {
  8569         // Ignore mark word because we are running concurrent with mutators
  8570         assert(oop(addr)->is_oop(true), "live block should be an oop");
  8571         assert(size ==
  8572                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
  8573                "P-mark and computed size do not agree");
  8575 #endif
  8577   } else {
  8578     // This should be an initialized object that's alive.
  8579     assert(oop(addr)->klass_or_null() != NULL,
  8580            "Should be an initialized object");
  8581     // Ignore mark word because we are running concurrent with mutators
  8582     assert(oop(addr)->is_oop(true), "live block should be an oop");
  8583     // Verify that the bit map has no bits marked between
  8584     // addr and purported end of this block.
  8585     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
  8586     assert(size >= 3, "Necessary for Printezis marks to work");
  8587     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
  8588     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
  8590   return size;
  8593 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
  8594                                                  size_t chunkSize) {
  8595   // do_post_free_or_garbage_chunk() should only be called in the case
  8596   // of the adaptive free list allocator.
  8597   const bool fcInFreeLists = fc->is_free();
  8598   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
  8599   assert((HeapWord*)fc <= _limit, "sweep invariant");
  8600   if (CMSTestInFreeList && fcInFreeLists) {
  8601     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
  8604   if (CMSTraceSweeper) {
  8605     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
  8608   HeapWord* const fc_addr = (HeapWord*) fc;
  8610   bool coalesce;
  8611   const size_t left  = pointer_delta(fc_addr, freeFinger());
  8612   const size_t right = chunkSize;
  8613   switch (FLSCoalescePolicy) {
  8614     // numeric value forms a coalition aggressiveness metric
  8615     case 0:  { // never coalesce
  8616       coalesce = false;
  8617       break;
  8619     case 1: { // coalesce if left & right chunks on overpopulated lists
  8620       coalesce = _sp->coalOverPopulated(left) &&
  8621                  _sp->coalOverPopulated(right);
  8622       break;
  8624     case 2: { // coalesce if left chunk on overpopulated list (default)
  8625       coalesce = _sp->coalOverPopulated(left);
  8626       break;
  8628     case 3: { // coalesce if left OR right chunk on overpopulated list
  8629       coalesce = _sp->coalOverPopulated(left) ||
  8630                  _sp->coalOverPopulated(right);
  8631       break;
  8633     case 4: { // always coalesce
  8634       coalesce = true;
  8635       break;
  8637     default:
  8638      ShouldNotReachHere();
  8641   // Should the current free range be coalesced?
  8642   // If the chunk is in a free range and either we decided to coalesce above
  8643   // or the chunk is near the large block at the end of the heap
  8644   // (isNearLargestChunk() returns true), then coalesce this chunk.
  8645   const bool doCoalesce = inFreeRange()
  8646                           && (coalesce || _g->isNearLargestChunk(fc_addr));
  8647   if (doCoalesce) {
  8648     // Coalesce the current free range on the left with the new
  8649     // chunk on the right.  If either is on a free list,
  8650     // it must be removed from the list and stashed in the closure.
  8651     if (freeRangeInFreeLists()) {
  8652       FreeChunk* const ffc = (FreeChunk*)freeFinger();
  8653       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
  8654         "Size of free range is inconsistent with chunk size.");
  8655       if (CMSTestInFreeList) {
  8656         assert(_sp->verify_chunk_in_free_list(ffc),
  8657           "Chunk is not in free lists");
  8659       _sp->coalDeath(ffc->size());
  8660       _sp->removeFreeChunkFromFreeLists(ffc);
  8661       set_freeRangeInFreeLists(false);
  8663     if (fcInFreeLists) {
  8664       _sp->coalDeath(chunkSize);
  8665       assert(fc->size() == chunkSize,
  8666         "The chunk has the wrong size or is not in the free lists");
  8667       _sp->removeFreeChunkFromFreeLists(fc);
  8669     set_lastFreeRangeCoalesced(true);
  8670     print_free_block_coalesced(fc);
  8671   } else {  // not in a free range and/or should not coalesce
  8672     // Return the current free range and start a new one.
  8673     if (inFreeRange()) {
  8674       // In a free range but cannot coalesce with the right hand chunk.
  8675       // Put the current free range into the free lists.
  8676       flush_cur_free_chunk(freeFinger(),
  8677                            pointer_delta(fc_addr, freeFinger()));
  8679     // Set up for new free range.  Pass along whether the right hand
  8680     // chunk is in the free lists.
  8681     initialize_free_range((HeapWord*)fc, fcInFreeLists);
  8685 // Lookahead flush:
  8686 // If we are tracking a free range, and this is the last chunk that
  8687 // we'll look at because its end crosses past _limit, we'll preemptively
  8688 // flush it along with any free range we may be holding on to. Note that
  8689 // this can be the case only for an already free or freshly garbage
  8690 // chunk. If this block is an object, it can never straddle
  8691 // over _limit. The "straddling" occurs when _limit is set at
  8692 // the previous end of the space when this cycle started, and
  8693 // a subsequent heap expansion caused the previously co-terminal
  8694 // free block to be coalesced with the newly expanded portion,
  8695 // thus rendering _limit a non-block-boundary making it dangerous
  8696 // for the sweeper to step over and examine.
  8697 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
  8698   assert(inFreeRange(), "Should only be called if currently in a free range.");
  8699   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
  8700   assert(_sp->used_region().contains(eob - 1),
  8701          err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
  8702                  " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
  8703                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
  8704                  eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
  8705   if (eob >= _limit) {
  8706     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
  8707     if (CMSTraceSweeper) {
  8708       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
  8709                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
  8710                              "[" PTR_FORMAT "," PTR_FORMAT ")",
  8711                              _limit, fc, eob, _sp->bottom(), _sp->end());
  8713     // Return the storage we are tracking back into the free lists.
  8714     if (CMSTraceSweeper) {
  8715       gclog_or_tty->print_cr("Flushing ... ");
  8717     assert(freeFinger() < eob, "Error");
  8718     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
  8722 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
  8723   assert(inFreeRange(), "Should only be called if currently in a free range.");
  8724   assert(size > 0,
  8725     "A zero sized chunk cannot be added to the free lists.");
  8726   if (!freeRangeInFreeLists()) {
  8727     if (CMSTestInFreeList) {
  8728       FreeChunk* fc = (FreeChunk*) chunk;
  8729       fc->set_size(size);
  8730       assert(!_sp->verify_chunk_in_free_list(fc),
  8731         "chunk should not be in free lists yet");
  8733     if (CMSTraceSweeper) {
  8734       gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
  8735                     chunk, size);
  8737     // A new free range is going to be starting.  The current
  8738     // free range has not been added to the free lists yet or
  8739     // was removed so add it back.
  8740     // If the current free range was coalesced, then the death
  8741     // of the free range was recorded.  Record a birth now.
  8742     if (lastFreeRangeCoalesced()) {
  8743       _sp->coalBirth(size);
  8745     _sp->addChunkAndRepairOffsetTable(chunk, size,
  8746             lastFreeRangeCoalesced());
  8747   } else if (CMSTraceSweeper) {
  8748     gclog_or_tty->print_cr("Already in free list: nothing to flush");
  8750   set_inFreeRange(false);
  8751   set_freeRangeInFreeLists(false);
  8754 // We take a break if we've been at this for a while,
  8755 // so as to avoid monopolizing the locks involved.
  8756 void SweepClosure::do_yield_work(HeapWord* addr) {
  8757   // Return current free chunk being used for coalescing (if any)
  8758   // to the appropriate freelist.  After yielding, the next
  8759   // free block encountered will start a coalescing range of
  8760   // free blocks.  If the next free block is adjacent to the
  8761   // chunk just flushed, they will need to wait for the next
  8762   // sweep to be coalesced.
  8763   if (inFreeRange()) {
  8764     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
  8767   // First give up the locks, then yield, then re-lock.
  8768   // We should probably use a constructor/destructor idiom to
  8769   // do this unlock/lock or modify the MutexUnlocker class to
  8770   // serve our purpose. XXX
  8771   assert_lock_strong(_bitMap->lock());
  8772   assert_lock_strong(_freelistLock);
  8773   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  8774          "CMS thread should hold CMS token");
  8775   _bitMap->lock()->unlock();
  8776   _freelistLock->unlock();
  8777   ConcurrentMarkSweepThread::desynchronize(true);
  8778   ConcurrentMarkSweepThread::acknowledge_yield_request();
  8779   _collector->stopTimer();
  8780   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  8781   if (PrintCMSStatistics != 0) {
  8782     _collector->incrementYields();
  8784   _collector->icms_wait();
  8786   // See the comment in coordinator_yield()
  8787   for (unsigned i = 0; i < CMSYieldSleepCount &&
  8788                        ConcurrentMarkSweepThread::should_yield() &&
  8789                        !CMSCollector::foregroundGCIsActive(); ++i) {
  8790     os::sleep(Thread::current(), 1, false);
  8791     ConcurrentMarkSweepThread::acknowledge_yield_request();
  8794   ConcurrentMarkSweepThread::synchronize(true);
  8795   _freelistLock->lock();
  8796   _bitMap->lock()->lock_without_safepoint_check();
  8797   _collector->startTimer();
  8800 #ifndef PRODUCT
  8801 // This is actually very useful in a product build if it can
  8802 // be called from the debugger.  Compile it into the product
  8803 // as needed.
  8804 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
  8805   return debug_cms_space->verify_chunk_in_free_list(fc);
  8807 #endif
  8809 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
  8810   if (CMSTraceSweeper) {
  8811     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
  8812                            fc, fc->size());
  8816 // CMSIsAliveClosure
  8817 bool CMSIsAliveClosure::do_object_b(oop obj) {
  8818   HeapWord* addr = (HeapWord*)obj;
  8819   return addr != NULL &&
  8820          (!_span.contains(addr) || _bit_map->isMarked(addr));
  8824 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
  8825                       MemRegion span,
  8826                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
  8827                       bool cpc):
  8828   _collector(collector),
  8829   _span(span),
  8830   _bit_map(bit_map),
  8831   _mark_stack(mark_stack),
  8832   _concurrent_precleaning(cpc) {
  8833   assert(!_span.is_empty(), "Empty span could spell trouble");
  8837 // CMSKeepAliveClosure: the serial version
  8838 void CMSKeepAliveClosure::do_oop(oop obj) {
  8839   HeapWord* addr = (HeapWord*)obj;
  8840   if (_span.contains(addr) &&
  8841       !_bit_map->isMarked(addr)) {
  8842     _bit_map->mark(addr);
  8843     bool simulate_overflow = false;
  8844     NOT_PRODUCT(
  8845       if (CMSMarkStackOverflowALot &&
  8846           _collector->simulate_overflow()) {
  8847         // simulate a stack overflow
  8848         simulate_overflow = true;
  8851     if (simulate_overflow || !_mark_stack->push(obj)) {
  8852       if (_concurrent_precleaning) {
  8853         // We dirty the overflown object and let the remark
  8854         // phase deal with it.
  8855         assert(_collector->overflow_list_is_empty(), "Error");
  8856         // In the case of object arrays, we need to dirty all of
  8857         // the cards that the object spans. No locking or atomics
  8858         // are needed since no one else can be mutating the mod union
  8859         // table.
  8860         if (obj->is_objArray()) {
  8861           size_t sz = obj->size();
  8862           HeapWord* end_card_addr =
  8863             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
  8864           MemRegion redirty_range = MemRegion(addr, end_card_addr);
  8865           assert(!redirty_range.is_empty(), "Arithmetical tautology");
  8866           _collector->_modUnionTable.mark_range(redirty_range);
  8867         } else {
  8868           _collector->_modUnionTable.mark(addr);
  8870         _collector->_ser_kac_preclean_ovflw++;
  8871       } else {
  8872         _collector->push_on_overflow_list(obj);
  8873         _collector->_ser_kac_ovflw++;
  8879 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
  8880 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
  8882 // CMSParKeepAliveClosure: a parallel version of the above.
  8883 // The work queues are private to each closure (thread),
  8884 // but (may be) available for stealing by other threads.
  8885 void CMSParKeepAliveClosure::do_oop(oop obj) {
  8886   HeapWord* addr = (HeapWord*)obj;
  8887   if (_span.contains(addr) &&
  8888       !_bit_map->isMarked(addr)) {
  8889     // In general, during recursive tracing, several threads
  8890     // may be concurrently getting here; the first one to
  8891     // "tag" it, claims it.
  8892     if (_bit_map->par_mark(addr)) {
  8893       bool res = _work_queue->push(obj);
  8894       assert(res, "Low water mark should be much less than capacity");
  8895       // Do a recursive trim in the hope that this will keep
  8896       // stack usage lower, but leave some oops for potential stealers
  8897       trim_queue(_low_water_mark);
  8898     } // Else, another thread got there first
  8902 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
  8903 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
  8905 void CMSParKeepAliveClosure::trim_queue(uint max) {
  8906   while (_work_queue->size() > max) {
  8907     oop new_oop;
  8908     if (_work_queue->pop_local(new_oop)) {
  8909       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
  8910       assert(_bit_map->isMarked((HeapWord*)new_oop),
  8911              "no white objects on this stack!");
  8912       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
  8913       // iterate over the oops in this oop, marking and pushing
  8914       // the ones in CMS heap (i.e. in _span).
  8915       new_oop->oop_iterate(&_mark_and_push);
  8920 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
  8921                                 CMSCollector* collector,
  8922                                 MemRegion span, CMSBitMap* bit_map,
  8923                                 OopTaskQueue* work_queue):
  8924   _collector(collector),
  8925   _span(span),
  8926   _bit_map(bit_map),
  8927   _work_queue(work_queue) { }
  8929 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
  8930   HeapWord* addr = (HeapWord*)obj;
  8931   if (_span.contains(addr) &&
  8932       !_bit_map->isMarked(addr)) {
  8933     if (_bit_map->par_mark(addr)) {
  8934       bool simulate_overflow = false;
  8935       NOT_PRODUCT(
  8936         if (CMSMarkStackOverflowALot &&
  8937             _collector->par_simulate_overflow()) {
  8938           // simulate a stack overflow
  8939           simulate_overflow = true;
  8942       if (simulate_overflow || !_work_queue->push(obj)) {
  8943         _collector->par_push_on_overflow_list(obj);
  8944         _collector->_par_kac_ovflw++;
  8946     } // Else another thread got there already
  8950 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
  8951 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
  8953 //////////////////////////////////////////////////////////////////
  8954 //  CMSExpansionCause                /////////////////////////////
  8955 //////////////////////////////////////////////////////////////////
  8956 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
  8957   switch (cause) {
  8958     case _no_expansion:
  8959       return "No expansion";
  8960     case _satisfy_free_ratio:
  8961       return "Free ratio";
  8962     case _satisfy_promotion:
  8963       return "Satisfy promotion";
  8964     case _satisfy_allocation:
  8965       return "allocation";
  8966     case _allocate_par_lab:
  8967       return "Par LAB";
  8968     case _allocate_par_spooling_space:
  8969       return "Par Spooling Space";
  8970     case _adaptive_size_policy:
  8971       return "Ergonomics";
  8972     default:
  8973       return "unknown";
  8977 void CMSDrainMarkingStackClosure::do_void() {
  8978   // the max number to take from overflow list at a time
  8979   const size_t num = _mark_stack->capacity()/4;
  8980   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
  8981          "Overflow list should be NULL during concurrent phases");
  8982   while (!_mark_stack->isEmpty() ||
  8983          // if stack is empty, check the overflow list
  8984          _collector->take_from_overflow_list(num, _mark_stack)) {
  8985     oop obj = _mark_stack->pop();
  8986     HeapWord* addr = (HeapWord*)obj;
  8987     assert(_span.contains(addr), "Should be within span");
  8988     assert(_bit_map->isMarked(addr), "Should be marked");
  8989     assert(obj->is_oop(), "Should be an oop");
  8990     obj->oop_iterate(_keep_alive);
  8994 void CMSParDrainMarkingStackClosure::do_void() {
  8995   // drain queue
  8996   trim_queue(0);
  8999 // Trim our work_queue so its length is below max at return
  9000 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
  9001   while (_work_queue->size() > max) {
  9002     oop new_oop;
  9003     if (_work_queue->pop_local(new_oop)) {
  9004       assert(new_oop->is_oop(), "Expected an oop");
  9005       assert(_bit_map->isMarked((HeapWord*)new_oop),
  9006              "no white objects on this stack!");
  9007       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
  9008       // iterate over the oops in this oop, marking and pushing
  9009       // the ones in CMS heap (i.e. in _span).
  9010       new_oop->oop_iterate(&_mark_and_push);
  9015 ////////////////////////////////////////////////////////////////////
  9016 // Support for Marking Stack Overflow list handling and related code
  9017 ////////////////////////////////////////////////////////////////////
  9018 // Much of the following code is similar in shape and spirit to the
  9019 // code used in ParNewGC. We should try and share that code
  9020 // as much as possible in the future.
  9022 #ifndef PRODUCT
  9023 // Debugging support for CMSStackOverflowALot
  9025 // It's OK to call this multi-threaded;  the worst thing
  9026 // that can happen is that we'll get a bunch of closely
  9027 // spaced simulated oveflows, but that's OK, in fact
  9028 // probably good as it would exercise the overflow code
  9029 // under contention.
  9030 bool CMSCollector::simulate_overflow() {
  9031   if (_overflow_counter-- <= 0) { // just being defensive
  9032     _overflow_counter = CMSMarkStackOverflowInterval;
  9033     return true;
  9034   } else {
  9035     return false;
  9039 bool CMSCollector::par_simulate_overflow() {
  9040   return simulate_overflow();
  9042 #endif
  9044 // Single-threaded
  9045 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
  9046   assert(stack->isEmpty(), "Expected precondition");
  9047   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
  9048   size_t i = num;
  9049   oop  cur = _overflow_list;
  9050   const markOop proto = markOopDesc::prototype();
  9051   NOT_PRODUCT(ssize_t n = 0;)
  9052   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
  9053     next = oop(cur->mark());
  9054     cur->set_mark(proto);   // until proven otherwise
  9055     assert(cur->is_oop(), "Should be an oop");
  9056     bool res = stack->push(cur);
  9057     assert(res, "Bit off more than can chew?");
  9058     NOT_PRODUCT(n++;)
  9060   _overflow_list = cur;
  9061 #ifndef PRODUCT
  9062   assert(_num_par_pushes >= n, "Too many pops?");
  9063   _num_par_pushes -=n;
  9064 #endif
  9065   return !stack->isEmpty();
  9068 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
  9069 // (MT-safe) Get a prefix of at most "num" from the list.
  9070 // The overflow list is chained through the mark word of
  9071 // each object in the list. We fetch the entire list,
  9072 // break off a prefix of the right size and return the
  9073 // remainder. If other threads try to take objects from
  9074 // the overflow list at that time, they will wait for
  9075 // some time to see if data becomes available. If (and
  9076 // only if) another thread places one or more object(s)
  9077 // on the global list before we have returned the suffix
  9078 // to the global list, we will walk down our local list
  9079 // to find its end and append the global list to
  9080 // our suffix before returning it. This suffix walk can
  9081 // prove to be expensive (quadratic in the amount of traffic)
  9082 // when there are many objects in the overflow list and
  9083 // there is much producer-consumer contention on the list.
  9084 // *NOTE*: The overflow list manipulation code here and
  9085 // in ParNewGeneration:: are very similar in shape,
  9086 // except that in the ParNew case we use the old (from/eden)
  9087 // copy of the object to thread the list via its klass word.
  9088 // Because of the common code, if you make any changes in
  9089 // the code below, please check the ParNew version to see if
  9090 // similar changes might be needed.
  9091 // CR 6797058 has been filed to consolidate the common code.
  9092 bool CMSCollector::par_take_from_overflow_list(size_t num,
  9093                                                OopTaskQueue* work_q,
  9094                                                int no_of_gc_threads) {
  9095   assert(work_q->size() == 0, "First empty local work queue");
  9096   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
  9097   if (_overflow_list == NULL) {
  9098     return false;
  9100   // Grab the entire list; we'll put back a suffix
  9101   oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
  9102   Thread* tid = Thread::current();
  9103   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
  9104   // set to ParallelGCThreads.
  9105   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
  9106   size_t sleep_time_millis = MAX2((size_t)1, num/100);
  9107   // If the list is busy, we spin for a short while,
  9108   // sleeping between attempts to get the list.
  9109   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
  9110     os::sleep(tid, sleep_time_millis, false);
  9111     if (_overflow_list == NULL) {
  9112       // Nothing left to take
  9113       return false;
  9114     } else if (_overflow_list != BUSY) {
  9115       // Try and grab the prefix
  9116       prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
  9119   // If the list was found to be empty, or we spun long
  9120   // enough, we give up and return empty-handed. If we leave
  9121   // the list in the BUSY state below, it must be the case that
  9122   // some other thread holds the overflow list and will set it
  9123   // to a non-BUSY state in the future.
  9124   if (prefix == NULL || prefix == BUSY) {
  9125      // Nothing to take or waited long enough
  9126      if (prefix == NULL) {
  9127        // Write back the NULL in case we overwrote it with BUSY above
  9128        // and it is still the same value.
  9129        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
  9131      return false;
  9133   assert(prefix != NULL && prefix != BUSY, "Error");
  9134   size_t i = num;
  9135   oop cur = prefix;
  9136   // Walk down the first "num" objects, unless we reach the end.
  9137   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
  9138   if (cur->mark() == NULL) {
  9139     // We have "num" or fewer elements in the list, so there
  9140     // is nothing to return to the global list.
  9141     // Write back the NULL in lieu of the BUSY we wrote
  9142     // above, if it is still the same value.
  9143     if (_overflow_list == BUSY) {
  9144       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
  9146   } else {
  9147     // Chop off the suffix and rerturn it to the global list.
  9148     assert(cur->mark() != BUSY, "Error");
  9149     oop suffix_head = cur->mark(); // suffix will be put back on global list
  9150     cur->set_mark(NULL);           // break off suffix
  9151     // It's possible that the list is still in the empty(busy) state
  9152     // we left it in a short while ago; in that case we may be
  9153     // able to place back the suffix without incurring the cost
  9154     // of a walk down the list.
  9155     oop observed_overflow_list = _overflow_list;
  9156     oop cur_overflow_list = observed_overflow_list;
  9157     bool attached = false;
  9158     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
  9159       observed_overflow_list =
  9160         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
  9161       if (cur_overflow_list == observed_overflow_list) {
  9162         attached = true;
  9163         break;
  9164       } else cur_overflow_list = observed_overflow_list;
  9166     if (!attached) {
  9167       // Too bad, someone else sneaked in (at least) an element; we'll need
  9168       // to do a splice. Find tail of suffix so we can prepend suffix to global
  9169       // list.
  9170       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
  9171       oop suffix_tail = cur;
  9172       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
  9173              "Tautology");
  9174       observed_overflow_list = _overflow_list;
  9175       do {
  9176         cur_overflow_list = observed_overflow_list;
  9177         if (cur_overflow_list != BUSY) {
  9178           // Do the splice ...
  9179           suffix_tail->set_mark(markOop(cur_overflow_list));
  9180         } else { // cur_overflow_list == BUSY
  9181           suffix_tail->set_mark(NULL);
  9183         // ... and try to place spliced list back on overflow_list ...
  9184         observed_overflow_list =
  9185           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
  9186       } while (cur_overflow_list != observed_overflow_list);
  9187       // ... until we have succeeded in doing so.
  9191   // Push the prefix elements on work_q
  9192   assert(prefix != NULL, "control point invariant");
  9193   const markOop proto = markOopDesc::prototype();
  9194   oop next;
  9195   NOT_PRODUCT(ssize_t n = 0;)
  9196   for (cur = prefix; cur != NULL; cur = next) {
  9197     next = oop(cur->mark());
  9198     cur->set_mark(proto);   // until proven otherwise
  9199     assert(cur->is_oop(), "Should be an oop");
  9200     bool res = work_q->push(cur);
  9201     assert(res, "Bit off more than we can chew?");
  9202     NOT_PRODUCT(n++;)
  9204 #ifndef PRODUCT
  9205   assert(_num_par_pushes >= n, "Too many pops?");
  9206   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
  9207 #endif
  9208   return true;
  9211 // Single-threaded
  9212 void CMSCollector::push_on_overflow_list(oop p) {
  9213   NOT_PRODUCT(_num_par_pushes++;)
  9214   assert(p->is_oop(), "Not an oop");
  9215   preserve_mark_if_necessary(p);
  9216   p->set_mark((markOop)_overflow_list);
  9217   _overflow_list = p;
  9220 // Multi-threaded; use CAS to prepend to overflow list
  9221 void CMSCollector::par_push_on_overflow_list(oop p) {
  9222   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
  9223   assert(p->is_oop(), "Not an oop");
  9224   par_preserve_mark_if_necessary(p);
  9225   oop observed_overflow_list = _overflow_list;
  9226   oop cur_overflow_list;
  9227   do {
  9228     cur_overflow_list = observed_overflow_list;
  9229     if (cur_overflow_list != BUSY) {
  9230       p->set_mark(markOop(cur_overflow_list));
  9231     } else {
  9232       p->set_mark(NULL);
  9234     observed_overflow_list =
  9235       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
  9236   } while (cur_overflow_list != observed_overflow_list);
  9238 #undef BUSY
  9240 // Single threaded
  9241 // General Note on GrowableArray: pushes may silently fail
  9242 // because we are (temporarily) out of C-heap for expanding
  9243 // the stack. The problem is quite ubiquitous and affects
  9244 // a lot of code in the JVM. The prudent thing for GrowableArray
  9245 // to do (for now) is to exit with an error. However, that may
  9246 // be too draconian in some cases because the caller may be
  9247 // able to recover without much harm. For such cases, we
  9248 // should probably introduce a "soft_push" method which returns
  9249 // an indication of success or failure with the assumption that
  9250 // the caller may be able to recover from a failure; code in
  9251 // the VM can then be changed, incrementally, to deal with such
  9252 // failures where possible, thus, incrementally hardening the VM
  9253 // in such low resource situations.
  9254 void CMSCollector::preserve_mark_work(oop p, markOop m) {
  9255   _preserved_oop_stack.push(p);
  9256   _preserved_mark_stack.push(m);
  9257   assert(m == p->mark(), "Mark word changed");
  9258   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
  9259          "bijection");
  9262 // Single threaded
  9263 void CMSCollector::preserve_mark_if_necessary(oop p) {
  9264   markOop m = p->mark();
  9265   if (m->must_be_preserved(p)) {
  9266     preserve_mark_work(p, m);
  9270 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
  9271   markOop m = p->mark();
  9272   if (m->must_be_preserved(p)) {
  9273     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  9274     // Even though we read the mark word without holding
  9275     // the lock, we are assured that it will not change
  9276     // because we "own" this oop, so no other thread can
  9277     // be trying to push it on the overflow list; see
  9278     // the assertion in preserve_mark_work() that checks
  9279     // that m == p->mark().
  9280     preserve_mark_work(p, m);
  9284 // We should be able to do this multi-threaded,
  9285 // a chunk of stack being a task (this is
  9286 // correct because each oop only ever appears
  9287 // once in the overflow list. However, it's
  9288 // not very easy to completely overlap this with
  9289 // other operations, so will generally not be done
  9290 // until all work's been completed. Because we
  9291 // expect the preserved oop stack (set) to be small,
  9292 // it's probably fine to do this single-threaded.
  9293 // We can explore cleverer concurrent/overlapped/parallel
  9294 // processing of preserved marks if we feel the
  9295 // need for this in the future. Stack overflow should
  9296 // be so rare in practice and, when it happens, its
  9297 // effect on performance so great that this will
  9298 // likely just be in the noise anyway.
  9299 void CMSCollector::restore_preserved_marks_if_any() {
  9300   assert(SafepointSynchronize::is_at_safepoint(),
  9301          "world should be stopped");
  9302   assert(Thread::current()->is_ConcurrentGC_thread() ||
  9303          Thread::current()->is_VM_thread(),
  9304          "should be single-threaded");
  9305   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
  9306          "bijection");
  9308   while (!_preserved_oop_stack.is_empty()) {
  9309     oop p = _preserved_oop_stack.pop();
  9310     assert(p->is_oop(), "Should be an oop");
  9311     assert(_span.contains(p), "oop should be in _span");
  9312     assert(p->mark() == markOopDesc::prototype(),
  9313            "Set when taken from overflow list");
  9314     markOop m = _preserved_mark_stack.pop();
  9315     p->set_mark(m);
  9317   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
  9318          "stacks were cleared above");
  9321 #ifndef PRODUCT
  9322 bool CMSCollector::no_preserved_marks() const {
  9323   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
  9325 #endif
  9327 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
  9329   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
  9330   CMSAdaptiveSizePolicy* size_policy =
  9331     (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
  9332   assert(size_policy->is_gc_cms_adaptive_size_policy(),
  9333     "Wrong type for size policy");
  9334   return size_policy;
  9337 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
  9338                                            size_t desired_promo_size) {
  9339   if (cur_promo_size < desired_promo_size) {
  9340     size_t expand_bytes = desired_promo_size - cur_promo_size;
  9341     if (PrintAdaptiveSizePolicy && Verbose) {
  9342       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
  9343         "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
  9344         expand_bytes);
  9346     expand(expand_bytes,
  9347            MinHeapDeltaBytes,
  9348            CMSExpansionCause::_adaptive_size_policy);
  9349   } else if (desired_promo_size < cur_promo_size) {
  9350     size_t shrink_bytes = cur_promo_size - desired_promo_size;
  9351     if (PrintAdaptiveSizePolicy && Verbose) {
  9352       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
  9353         "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
  9354         shrink_bytes);
  9356     shrink(shrink_bytes);
  9360 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
  9361   GenCollectedHeap* gch = GenCollectedHeap::heap();
  9362   CMSGCAdaptivePolicyCounters* counters =
  9363     (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
  9364   assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
  9365     "Wrong kind of counters");
  9366   return counters;
  9370 void ASConcurrentMarkSweepGeneration::update_counters() {
  9371   if (UsePerfData) {
  9372     _space_counters->update_all();
  9373     _gen_counters->update_all();
  9374     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
  9375     GenCollectedHeap* gch = GenCollectedHeap::heap();
  9376     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
  9377     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
  9378       "Wrong gc statistics type");
  9379     counters->update_counters(gc_stats_l);
  9383 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
  9384   if (UsePerfData) {
  9385     _space_counters->update_used(used);
  9386     _space_counters->update_capacity();
  9387     _gen_counters->update_all();
  9389     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
  9390     GenCollectedHeap* gch = GenCollectedHeap::heap();
  9391     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
  9392     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
  9393       "Wrong gc statistics type");
  9394     counters->update_counters(gc_stats_l);
  9398 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
  9399   assert_locked_or_safepoint(Heap_lock);
  9400   assert_lock_strong(freelistLock());
  9401   HeapWord* old_end = _cmsSpace->end();
  9402   HeapWord* unallocated_start = _cmsSpace->unallocated_block();
  9403   assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
  9404   FreeChunk* chunk_at_end = find_chunk_at_end();
  9405   if (chunk_at_end == NULL) {
  9406     // No room to shrink
  9407     if (PrintGCDetails && Verbose) {
  9408       gclog_or_tty->print_cr("No room to shrink: old_end  "
  9409         PTR_FORMAT "  unallocated_start  " PTR_FORMAT
  9410         " chunk_at_end  " PTR_FORMAT,
  9411         old_end, unallocated_start, chunk_at_end);
  9413     return;
  9414   } else {
  9416     // Find the chunk at the end of the space and determine
  9417     // how much it can be shrunk.
  9418     size_t shrinkable_size_in_bytes = chunk_at_end->size();
  9419     size_t aligned_shrinkable_size_in_bytes =
  9420       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
  9421     assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
  9422       "Inconsistent chunk at end of space");
  9423     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
  9424     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
  9426     // Shrink the underlying space
  9427     _virtual_space.shrink_by(bytes);
  9428     if (PrintGCDetails && Verbose) {
  9429       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
  9430         " desired_bytes " SIZE_FORMAT
  9431         " shrinkable_size_in_bytes " SIZE_FORMAT
  9432         " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
  9433         "  bytes  " SIZE_FORMAT,
  9434         desired_bytes, shrinkable_size_in_bytes,
  9435         aligned_shrinkable_size_in_bytes, bytes);
  9436       gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
  9437         "  unallocated_start  " SIZE_FORMAT,
  9438         old_end, unallocated_start);
  9441     // If the space did shrink (shrinking is not guaranteed),
  9442     // shrink the chunk at the end by the appropriate amount.
  9443     if (((HeapWord*)_virtual_space.high()) < old_end) {
  9444       size_t new_word_size =
  9445         heap_word_size(_virtual_space.committed_size());
  9447       // Have to remove the chunk from the dictionary because it is changing
  9448       // size and might be someplace elsewhere in the dictionary.
  9450       // Get the chunk at end, shrink it, and put it
  9451       // back.
  9452       _cmsSpace->removeChunkFromDictionary(chunk_at_end);
  9453       size_t word_size_change = word_size_before - new_word_size;
  9454       size_t chunk_at_end_old_size = chunk_at_end->size();
  9455       assert(chunk_at_end_old_size >= word_size_change,
  9456         "Shrink is too large");
  9457       chunk_at_end->set_size(chunk_at_end_old_size -
  9458                           word_size_change);
  9459       _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
  9460         word_size_change);
  9462       _cmsSpace->returnChunkToDictionary(chunk_at_end);
  9464       MemRegion mr(_cmsSpace->bottom(), new_word_size);
  9465       _bts->resize(new_word_size);  // resize the block offset shared array
  9466       Universe::heap()->barrier_set()->resize_covered_region(mr);
  9467       _cmsSpace->assert_locked();
  9468       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
  9470       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
  9472       // update the space and generation capacity counters
  9473       if (UsePerfData) {
  9474         _space_counters->update_capacity();
  9475         _gen_counters->update_all();
  9478       if (Verbose && PrintGCDetails) {
  9479         size_t new_mem_size = _virtual_space.committed_size();
  9480         size_t old_mem_size = new_mem_size + bytes;
  9481         gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
  9482                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
  9486     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
  9487       "Inconsistency at end of space");
  9488     assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
  9489       "Shrinking is inconsistent");
  9490     return;
  9493 // Transfer some number of overflown objects to usual marking
  9494 // stack. Return true if some objects were transferred.
  9495 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
  9496   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
  9497                     (size_t)ParGCDesiredObjsFromOverflowList);
  9499   bool res = _collector->take_from_overflow_list(num, _mark_stack);
  9500   assert(_collector->overflow_list_is_empty() || res,
  9501          "If list is not empty, we should have taken something");
  9502   assert(!res || !_mark_stack->isEmpty(),
  9503          "If we took something, it should now be on our stack");
  9504   return res;
  9507 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
  9508   size_t res = _sp->block_size_no_stall(addr, _collector);
  9509   if (_sp->block_is_obj(addr)) {
  9510     if (_live_bit_map->isMarked(addr)) {
  9511       // It can't have been dead in a previous cycle
  9512       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
  9513     } else {
  9514       _dead_bit_map->mark(addr);      // mark the dead object
  9517   // Could be 0, if the block size could not be computed without stalling.
  9518   return res;
  9521 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
  9523   switch (phase) {
  9524     case CMSCollector::InitialMarking:
  9525       initialize(true  /* fullGC */ ,
  9526                  cause /* cause of the GC */,
  9527                  true  /* recordGCBeginTime */,
  9528                  true  /* recordPreGCUsage */,
  9529                  false /* recordPeakUsage */,
  9530                  false /* recordPostGCusage */,
  9531                  true  /* recordAccumulatedGCTime */,
  9532                  false /* recordGCEndTime */,
  9533                  false /* countCollection */  );
  9534       break;
  9536     case CMSCollector::FinalMarking:
  9537       initialize(true  /* fullGC */ ,
  9538                  cause /* cause of the GC */,
  9539                  false /* recordGCBeginTime */,
  9540                  false /* recordPreGCUsage */,
  9541                  false /* recordPeakUsage */,
  9542                  false /* recordPostGCusage */,
  9543                  true  /* recordAccumulatedGCTime */,
  9544                  false /* recordGCEndTime */,
  9545                  false /* countCollection */  );
  9546       break;
  9548     case CMSCollector::Sweeping:
  9549       initialize(true  /* fullGC */ ,
  9550                  cause /* cause of the GC */,
  9551                  false /* recordGCBeginTime */,
  9552                  false /* recordPreGCUsage */,
  9553                  true  /* recordPeakUsage */,
  9554                  true  /* recordPostGCusage */,
  9555                  false /* recordAccumulatedGCTime */,
  9556                  true  /* recordGCEndTime */,
  9557                  true  /* countCollection */  );
  9558       break;
  9560     default:
  9561       ShouldNotReachHere();

mercurial