src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Sun, 13 Apr 2008 17:43:42 -0400

author
coleenp
date
Sun, 13 Apr 2008 17:43:42 -0400
changeset 548
ba764ed4b6f2
parent 529
0834225a7916
child 578
b5489bb705c9
permissions
-rw-r--r--

6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold

     1 /*
     2  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl"
    28 // statics
    29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
    30 bool          CMSCollector::_full_gc_requested          = false;
    32 //////////////////////////////////////////////////////////////////
    33 // In support of CMS/VM thread synchronization
    34 //////////////////////////////////////////////////////////////////
    35 // We split use of the CGC_lock into 2 "levels".
    36 // The low-level locking is of the usual CGC_lock monitor. We introduce
    37 // a higher level "token" (hereafter "CMS token") built on top of the
    38 // low level monitor (hereafter "CGC lock").
    39 // The token-passing protocol gives priority to the VM thread. The
    40 // CMS-lock doesn't provide any fairness guarantees, but clients
    41 // should ensure that it is only held for very short, bounded
    42 // durations.
    43 //
    44 // When either of the CMS thread or the VM thread is involved in
    45 // collection operations during which it does not want the other
    46 // thread to interfere, it obtains the CMS token.
    47 //
    48 // If either thread tries to get the token while the other has
    49 // it, that thread waits. However, if the VM thread and CMS thread
    50 // both want the token, then the VM thread gets priority while the
    51 // CMS thread waits. This ensures, for instance, that the "concurrent"
    52 // phases of the CMS thread's work do not block out the VM thread
    53 // for long periods of time as the CMS thread continues to hog
    54 // the token. (See bug 4616232).
    55 //
    56 // The baton-passing functions are, however, controlled by the
    57 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
    58 // and here the low-level CMS lock, not the high level token,
    59 // ensures mutual exclusion.
    60 //
    61 // Two important conditions that we have to satisfy:
    62 // 1. if a thread does a low-level wait on the CMS lock, then it
    63 //    relinquishes the CMS token if it were holding that token
    64 //    when it acquired the low-level CMS lock.
    65 // 2. any low-level notifications on the low-level lock
    66 //    should only be sent when a thread has relinquished the token.
    67 //
    68 // In the absence of either property, we'd have potential deadlock.
    69 //
    70 // We protect each of the CMS (concurrent and sequential) phases
    71 // with the CMS _token_, not the CMS _lock_.
    72 //
    73 // The only code protected by CMS lock is the token acquisition code
    74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
    75 // baton-passing code.
    76 //
    77 // Unfortunately, i couldn't come up with a good abstraction to factor and
    78 // hide the naked CGC_lock manipulation in the baton-passing code
    79 // further below. That's something we should try to do. Also, the proof
    80 // of correctness of this 2-level locking scheme is far from obvious,
    81 // and potentially quite slippery. We have an uneasy supsicion, for instance,
    82 // that there may be a theoretical possibility of delay/starvation in the
    83 // low-level lock/wait/notify scheme used for the baton-passing because of
    84 // potential intereference with the priority scheme embodied in the
    85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
    86 // invocation further below and marked with "XXX 20011219YSR".
    87 // Indeed, as we note elsewhere, this may become yet more slippery
    88 // in the presence of multiple CMS and/or multiple VM threads. XXX
    90 class CMSTokenSync: public StackObj {
    91  private:
    92   bool _is_cms_thread;
    93  public:
    94   CMSTokenSync(bool is_cms_thread):
    95     _is_cms_thread(is_cms_thread) {
    96     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
    97            "Incorrect argument to constructor");
    98     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
    99   }
   101   ~CMSTokenSync() {
   102     assert(_is_cms_thread ?
   103              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
   104              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
   105           "Incorrect state");
   106     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
   107   }
   108 };
   110 // Convenience class that does a CMSTokenSync, and then acquires
   111 // upto three locks.
   112 class CMSTokenSyncWithLocks: public CMSTokenSync {
   113  private:
   114   // Note: locks are acquired in textual declaration order
   115   // and released in the opposite order
   116   MutexLockerEx _locker1, _locker2, _locker3;
   117  public:
   118   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
   119                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
   120     CMSTokenSync(is_cms_thread),
   121     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
   122     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
   123     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
   124   { }
   125 };
   128 // Wrapper class to temporarily disable icms during a foreground cms collection.
   129 class ICMSDisabler: public StackObj {
   130  public:
   131   // The ctor disables icms and wakes up the thread so it notices the change;
   132   // the dtor re-enables icms.  Note that the CMSCollector methods will check
   133   // CMSIncrementalMode.
   134   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
   135   ~ICMSDisabler() { CMSCollector::enable_icms(); }
   136 };
   138 //////////////////////////////////////////////////////////////////
   139 //  Concurrent Mark-Sweep Generation /////////////////////////////
   140 //////////////////////////////////////////////////////////////////
   142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
   144 // This struct contains per-thread things necessary to support parallel
   145 // young-gen collection.
   146 class CMSParGCThreadState: public CHeapObj {
   147  public:
   148   CFLS_LAB lab;
   149   PromotionInfo promo;
   151   // Constructor.
   152   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
   153     promo.setSpace(cfls);
   154   }
   155 };
   157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
   158      ReservedSpace rs, size_t initial_byte_size, int level,
   159      CardTableRS* ct, bool use_adaptive_freelists,
   160      FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
   161   CardGeneration(rs, initial_byte_size, level, ct),
   162   _dilatation_factor(((double)MinChunkSize)/((double)(oopDesc::header_size()))),
   163   _debug_collection_type(Concurrent_collection_type)
   164 {
   165   HeapWord* bottom = (HeapWord*) _virtual_space.low();
   166   HeapWord* end    = (HeapWord*) _virtual_space.high();
   168   _direct_allocated_words = 0;
   169   NOT_PRODUCT(
   170     _numObjectsPromoted = 0;
   171     _numWordsPromoted = 0;
   172     _numObjectsAllocated = 0;
   173     _numWordsAllocated = 0;
   174   )
   176   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
   177                                            use_adaptive_freelists,
   178                                            dictionaryChoice);
   179   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
   180   if (_cmsSpace == NULL) {
   181     vm_exit_during_initialization(
   182       "CompactibleFreeListSpace allocation failure");
   183   }
   184   _cmsSpace->_gen = this;
   186   _gc_stats = new CMSGCStats();
   188   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
   189   // offsets match. The ability to tell free chunks from objects
   190   // depends on this property.
   191   debug_only(
   192     FreeChunk* junk = NULL;
   193     assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
   194            "Offset of FreeChunk::_prev within FreeChunk must match"
   195            "  that of OopDesc::_klass within OopDesc");
   196   )
   197   if (ParallelGCThreads > 0) {
   198     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
   199     _par_gc_thread_states =
   200       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads);
   201     if (_par_gc_thread_states == NULL) {
   202       vm_exit_during_initialization("Could not allocate par gc structs");
   203     }
   204     for (uint i = 0; i < ParallelGCThreads; i++) {
   205       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
   206       if (_par_gc_thread_states[i] == NULL) {
   207         vm_exit_during_initialization("Could not allocate par gc structs");
   208       }
   209     }
   210   } else {
   211     _par_gc_thread_states = NULL;
   212   }
   213   _incremental_collection_failed = false;
   214   // The "dilatation_factor" is the expansion that can occur on
   215   // account of the fact that the minimum object size in the CMS
   216   // generation may be larger than that in, say, a contiguous young
   217   //  generation.
   218   // Ideally, in the calculation below, we'd compute the dilatation
   219   // factor as: MinChunkSize/(promoting_gen's min object size)
   220   // Since we do not have such a general query interface for the
   221   // promoting generation, we'll instead just use the mimimum
   222   // object size (which today is a header's worth of space);
   223   // note that all arithmetic is in units of HeapWords.
   224   assert(MinChunkSize >= oopDesc::header_size(), "just checking");
   225   assert(_dilatation_factor >= 1.0, "from previous assert");
   226 }
   229 // The field "_initiating_occupancy" represents the occupancy percentage
   230 // at which we trigger a new collection cycle.  Unless explicitly specified
   231 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it
   232 // is calculated by:
   233 //
   234 //   Let "f" be MinHeapFreeRatio in
   235 //
   236 //    _intiating_occupancy = 100-f +
   237 //                           f * (CMSTrigger[Perm]Ratio/100)
   238 //   where CMSTrigger[Perm]Ratio is the argument "tr" below.
   239 //
   240 // That is, if we assume the heap is at its desired maximum occupancy at the
   241 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free
   242 // space be allocated before initiating a new collection cycle.
   243 //
   244 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
   245   assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
   246   if (io >= 0) {
   247     _initiating_occupancy = (double)io / 100.0;
   248   } else {
   249     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
   250                              (double)(tr * MinHeapFreeRatio) / 100.0)
   251                             / 100.0;
   252   }
   253 }
   256 void ConcurrentMarkSweepGeneration::ref_processor_init() {
   257   assert(collector() != NULL, "no collector");
   258   collector()->ref_processor_init();
   259 }
   261 void CMSCollector::ref_processor_init() {
   262   if (_ref_processor == NULL) {
   263     // Allocate and initialize a reference processor
   264     _ref_processor = ReferenceProcessor::create_ref_processor(
   265         _span,                               // span
   266         _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
   267         _cmsGen->refs_discovery_is_mt(),     // mt_discovery
   268         &_is_alive_closure,
   269         ParallelGCThreads,
   270         ParallelRefProcEnabled);
   271     // Initialize the _ref_processor field of CMSGen
   272     _cmsGen->set_ref_processor(_ref_processor);
   274     // Allocate a dummy ref processor for perm gen.
   275     ReferenceProcessor* rp2 = new ReferenceProcessor();
   276     if (rp2 == NULL) {
   277       vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
   278     }
   279     _permGen->set_ref_processor(rp2);
   280   }
   281 }
   283 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
   284   GenCollectedHeap* gch = GenCollectedHeap::heap();
   285   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
   286     "Wrong type of heap");
   287   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
   288     gch->gen_policy()->size_policy();
   289   assert(sp->is_gc_cms_adaptive_size_policy(),
   290     "Wrong type of size policy");
   291   return sp;
   292 }
   294 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
   295   CMSGCAdaptivePolicyCounters* results =
   296     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
   297   assert(
   298     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
   299     "Wrong gc policy counter kind");
   300   return results;
   301 }
   304 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
   306   const char* gen_name = "old";
   308   // Generation Counters - generation 1, 1 subspace
   309   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
   311   _space_counters = new GSpaceCounters(gen_name, 0,
   312                                        _virtual_space.reserved_size(),
   313                                        this, _gen_counters);
   314 }
   316 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
   317   _cms_gen(cms_gen)
   318 {
   319   assert(alpha <= 100, "bad value");
   320   _saved_alpha = alpha;
   322   // Initialize the alphas to the bootstrap value of 100.
   323   _gc0_alpha = _cms_alpha = 100;
   325   _cms_begin_time.update();
   326   _cms_end_time.update();
   328   _gc0_duration = 0.0;
   329   _gc0_period = 0.0;
   330   _gc0_promoted = 0;
   332   _cms_duration = 0.0;
   333   _cms_period = 0.0;
   334   _cms_allocated = 0;
   336   _cms_used_at_gc0_begin = 0;
   337   _cms_used_at_gc0_end = 0;
   338   _allow_duty_cycle_reduction = false;
   339   _valid_bits = 0;
   340   _icms_duty_cycle = CMSIncrementalDutyCycle;
   341 }
   343 // If promotion failure handling is on use
   344 // the padded average size of the promotion for each
   345 // young generation collection.
   346 double CMSStats::time_until_cms_gen_full() const {
   347   size_t cms_free = _cms_gen->cmsSpace()->free();
   348   GenCollectedHeap* gch = GenCollectedHeap::heap();
   349   size_t expected_promotion = gch->get_gen(0)->capacity();
   350   if (HandlePromotionFailure) {
   351     expected_promotion = MIN2(
   352         (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
   353         expected_promotion);
   354   }
   355   if (cms_free > expected_promotion) {
   356     // Start a cms collection if there isn't enough space to promote
   357     // for the next minor collection.  Use the padded average as
   358     // a safety factor.
   359     cms_free -= expected_promotion;
   361     // Adjust by the safety factor.
   362     double cms_free_dbl = (double)cms_free;
   363     cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
   365     if (PrintGCDetails && Verbose) {
   366       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
   367         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
   368         cms_free, expected_promotion);
   369       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
   370         cms_free_dbl, cms_consumption_rate() + 1.0);
   371     }
   372     // Add 1 in case the consumption rate goes to zero.
   373     return cms_free_dbl / (cms_consumption_rate() + 1.0);
   374   }
   375   return 0.0;
   376 }
   378 // Compare the duration of the cms collection to the
   379 // time remaining before the cms generation is empty.
   380 // Note that the time from the start of the cms collection
   381 // to the start of the cms sweep (less than the total
   382 // duration of the cms collection) can be used.  This
   383 // has been tried and some applications experienced
   384 // promotion failures early in execution.  This was
   385 // possibly because the averages were not accurate
   386 // enough at the beginning.
   387 double CMSStats::time_until_cms_start() const {
   388   // We add "gc0_period" to the "work" calculation
   389   // below because this query is done (mostly) at the
   390   // end of a scavenge, so we need to conservatively
   391   // account for that much possible delay
   392   // in the query so as to avoid concurrent mode failures
   393   // due to starting the collection just a wee bit too
   394   // late.
   395   double work = cms_duration() + gc0_period();
   396   double deadline = time_until_cms_gen_full();
   397   if (work > deadline) {
   398     if (Verbose && PrintGCDetails) {
   399       gclog_or_tty->print(
   400         " CMSCollector: collect because of anticipated promotion "
   401         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
   402         gc0_period(), time_until_cms_gen_full());
   403     }
   404     return 0.0;
   405   }
   406   return work - deadline;
   407 }
   409 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
   410 // amount of change to prevent wild oscillation.
   411 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
   412                                               unsigned int new_duty_cycle) {
   413   assert(old_duty_cycle <= 100, "bad input value");
   414   assert(new_duty_cycle <= 100, "bad input value");
   416   // Note:  use subtraction with caution since it may underflow (values are
   417   // unsigned).  Addition is safe since we're in the range 0-100.
   418   unsigned int damped_duty_cycle = new_duty_cycle;
   419   if (new_duty_cycle < old_duty_cycle) {
   420     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
   421     if (new_duty_cycle + largest_delta < old_duty_cycle) {
   422       damped_duty_cycle = old_duty_cycle - largest_delta;
   423     }
   424   } else if (new_duty_cycle > old_duty_cycle) {
   425     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
   426     if (new_duty_cycle > old_duty_cycle + largest_delta) {
   427       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
   428     }
   429   }
   430   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
   432   if (CMSTraceIncrementalPacing) {
   433     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
   434                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
   435   }
   436   return damped_duty_cycle;
   437 }
   439 unsigned int CMSStats::icms_update_duty_cycle_impl() {
   440   assert(CMSIncrementalPacing && valid(),
   441          "should be handled in icms_update_duty_cycle()");
   443   double cms_time_so_far = cms_timer().seconds();
   444   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
   445   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
   447   // Avoid division by 0.
   448   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
   449   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
   451   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
   452   if (new_duty_cycle > _icms_duty_cycle) {
   453     // Avoid very small duty cycles (1 or 2); 0 is allowed.
   454     if (new_duty_cycle > 2) {
   455       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
   456                                                 new_duty_cycle);
   457     }
   458   } else if (_allow_duty_cycle_reduction) {
   459     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
   460     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
   461     // Respect the minimum duty cycle.
   462     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
   463     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
   464   }
   466   if (PrintGCDetails || CMSTraceIncrementalPacing) {
   467     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
   468   }
   470   _allow_duty_cycle_reduction = false;
   471   return _icms_duty_cycle;
   472 }
   474 #ifndef PRODUCT
   475 void CMSStats::print_on(outputStream *st) const {
   476   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
   477   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
   478                gc0_duration(), gc0_period(), gc0_promoted());
   479   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
   480             cms_duration(), cms_duration_per_mb(),
   481             cms_period(), cms_allocated());
   482   st->print(",cms_since_beg=%g,cms_since_end=%g",
   483             cms_time_since_begin(), cms_time_since_end());
   484   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
   485             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
   486   if (CMSIncrementalMode) {
   487     st->print(",dc=%d", icms_duty_cycle());
   488   }
   490   if (valid()) {
   491     st->print(",promo_rate=%g,cms_alloc_rate=%g",
   492               promotion_rate(), cms_allocation_rate());
   493     st->print(",cms_consumption_rate=%g,time_until_full=%g",
   494               cms_consumption_rate(), time_until_cms_gen_full());
   495   }
   496   st->print(" ");
   497 }
   498 #endif // #ifndef PRODUCT
   500 CMSCollector::CollectorState CMSCollector::_collectorState =
   501                              CMSCollector::Idling;
   502 bool CMSCollector::_foregroundGCIsActive = false;
   503 bool CMSCollector::_foregroundGCShouldWait = false;
   505 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
   506                            ConcurrentMarkSweepGeneration* permGen,
   507                            CardTableRS*                   ct,
   508                            ConcurrentMarkSweepPolicy*     cp):
   509   _cmsGen(cmsGen),
   510   _permGen(permGen),
   511   _ct(ct),
   512   _ref_processor(NULL),    // will be set later
   513   _conc_workers(NULL),     // may be set later
   514   _abort_preclean(false),
   515   _start_sampling(false),
   516   _between_prologue_and_epilogue(false),
   517   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
   518   _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
   519   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
   520                  -1 /* lock-free */, "No_lock" /* dummy */),
   521   _modUnionClosure(&_modUnionTable),
   522   _modUnionClosurePar(&_modUnionTable),
   523   _is_alive_closure(&_markBitMap),
   524   _restart_addr(NULL),
   525   _overflow_list(NULL),
   526   _preserved_oop_stack(NULL),
   527   _preserved_mark_stack(NULL),
   528   _stats(cmsGen),
   529   _eden_chunk_array(NULL),     // may be set in ctor body
   530   _eden_chunk_capacity(0),     // -- ditto --
   531   _eden_chunk_index(0),        // -- ditto --
   532   _survivor_plab_array(NULL),  // -- ditto --
   533   _survivor_chunk_array(NULL), // -- ditto --
   534   _survivor_chunk_capacity(0), // -- ditto --
   535   _survivor_chunk_index(0),    // -- ditto --
   536   _ser_pmc_preclean_ovflw(0),
   537   _ser_pmc_remark_ovflw(0),
   538   _par_pmc_remark_ovflw(0),
   539   _ser_kac_ovflw(0),
   540   _par_kac_ovflw(0),
   541 #ifndef PRODUCT
   542   _num_par_pushes(0),
   543 #endif
   544   _collection_count_start(0),
   545   _verifying(false),
   546   _icms_start_limit(NULL),
   547   _icms_stop_limit(NULL),
   548   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
   549   _completed_initialization(false),
   550   _collector_policy(cp),
   551   _should_unload_classes(false),
   552   _concurrent_cycles_since_last_unload(0),
   553   _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
   554 {
   555   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
   556     ExplicitGCInvokesConcurrent = true;
   557   }
   558   // Now expand the span and allocate the collection support structures
   559   // (MUT, marking bit map etc.) to cover both generations subject to
   560   // collection.
   562   // First check that _permGen is adjacent to _cmsGen and above it.
   563   assert(   _cmsGen->reserved().word_size()  > 0
   564          && _permGen->reserved().word_size() > 0,
   565          "generations should not be of zero size");
   566   assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(),
   567          "_cmsGen and _permGen should not overlap");
   568   assert(_cmsGen->reserved().end() == _permGen->reserved().start(),
   569          "_cmsGen->end() different from _permGen->start()");
   571   // For use by dirty card to oop closures.
   572   _cmsGen->cmsSpace()->set_collector(this);
   573   _permGen->cmsSpace()->set_collector(this);
   575   // Adjust my span to cover old (cms) gen and perm gen
   576   _span = _cmsGen->reserved()._union(_permGen->reserved());
   577   // Initialize the span of is_alive_closure
   578   _is_alive_closure.set_span(_span);
   580   // Allocate MUT and marking bit map
   581   {
   582     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
   583     if (!_markBitMap.allocate(_span)) {
   584       warning("Failed to allocate CMS Bit Map");
   585       return;
   586     }
   587     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
   588   }
   589   {
   590     _modUnionTable.allocate(_span);
   591     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
   592   }
   594   if (!_markStack.allocate(CMSMarkStackSize)) {
   595     warning("Failed to allocate CMS Marking Stack");
   596     return;
   597   }
   598   if (!_revisitStack.allocate(CMSRevisitStackSize)) {
   599     warning("Failed to allocate CMS Revisit Stack");
   600     return;
   601   }
   603   // Support for multi-threaded concurrent phases
   604   if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
   605     if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
   606       // just for now
   607       FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
   608     }
   609     if (ParallelCMSThreads > 1) {
   610       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
   611                                  ParallelCMSThreads, true);
   612       if (_conc_workers == NULL) {
   613         warning("GC/CMS: _conc_workers allocation failure: "
   614               "forcing -CMSConcurrentMTEnabled");
   615         CMSConcurrentMTEnabled = false;
   616       }
   617     } else {
   618       CMSConcurrentMTEnabled = false;
   619     }
   620   }
   621   if (!CMSConcurrentMTEnabled) {
   622     ParallelCMSThreads = 0;
   623   } else {
   624     // Turn off CMSCleanOnEnter optimization temporarily for
   625     // the MT case where it's not fixed yet; see 6178663.
   626     CMSCleanOnEnter = false;
   627   }
   628   assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
   629          "Inconsistency");
   631   // Parallel task queues; these are shared for the
   632   // concurrent and stop-world phases of CMS, but
   633   // are not shared with parallel scavenge (ParNew).
   634   {
   635     uint i;
   636     uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
   638     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
   639          || ParallelRefProcEnabled)
   640         && num_queues > 0) {
   641       _task_queues = new OopTaskQueueSet(num_queues);
   642       if (_task_queues == NULL) {
   643         warning("task_queues allocation failure.");
   644         return;
   645       }
   646       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues);
   647       if (_hash_seed == NULL) {
   648         warning("_hash_seed array allocation failure");
   649         return;
   650       }
   652       // XXX use a global constant instead of 64!
   653       typedef struct OopTaskQueuePadded {
   654         OopTaskQueue work_queue;
   655         char pad[64 - sizeof(OopTaskQueue)];  // prevent false sharing
   656       } OopTaskQueuePadded;
   658       for (i = 0; i < num_queues; i++) {
   659         OopTaskQueuePadded *q_padded = new OopTaskQueuePadded();
   660         if (q_padded == NULL) {
   661           warning("work_queue allocation failure.");
   662           return;
   663         }
   664         _task_queues->register_queue(i, &q_padded->work_queue);
   665       }
   666       for (i = 0; i < num_queues; i++) {
   667         _task_queues->queue(i)->initialize();
   668         _hash_seed[i] = 17;  // copied from ParNew
   669       }
   670     }
   671   }
   673   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
   674   _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio);
   676   // Clip CMSBootstrapOccupancy between 0 and 100.
   677   _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
   678                          /(double)100;
   680   _full_gcs_since_conc_gc = 0;
   682   // Now tell CMS generations the identity of their collector
   683   ConcurrentMarkSweepGeneration::set_collector(this);
   685   // Create & start a CMS thread for this CMS collector
   686   _cmsThread = ConcurrentMarkSweepThread::start(this);
   687   assert(cmsThread() != NULL, "CMS Thread should have been created");
   688   assert(cmsThread()->collector() == this,
   689          "CMS Thread should refer to this gen");
   690   assert(CGC_lock != NULL, "Where's the CGC_lock?");
   692   // Support for parallelizing young gen rescan
   693   GenCollectedHeap* gch = GenCollectedHeap::heap();
   694   _young_gen = gch->prev_gen(_cmsGen);
   695   if (gch->supports_inline_contig_alloc()) {
   696     _top_addr = gch->top_addr();
   697     _end_addr = gch->end_addr();
   698     assert(_young_gen != NULL, "no _young_gen");
   699     _eden_chunk_index = 0;
   700     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
   701     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity);
   702     if (_eden_chunk_array == NULL) {
   703       _eden_chunk_capacity = 0;
   704       warning("GC/CMS: _eden_chunk_array allocation failure");
   705     }
   706   }
   707   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
   709   // Support for parallelizing survivor space rescan
   710   if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
   711     size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
   712     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
   713     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
   714     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
   715     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
   716         || _cursor == NULL) {
   717       warning("Failed to allocate survivor plab/chunk array");
   718       if (_survivor_plab_array  != NULL) {
   719         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
   720         _survivor_plab_array = NULL;
   721       }
   722       if (_survivor_chunk_array != NULL) {
   723         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
   724         _survivor_chunk_array = NULL;
   725       }
   726       if (_cursor != NULL) {
   727         FREE_C_HEAP_ARRAY(size_t, _cursor);
   728         _cursor = NULL;
   729       }
   730     } else {
   731       _survivor_chunk_capacity = 2*max_plab_samples;
   732       for (uint i = 0; i < ParallelGCThreads; i++) {
   733         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples);
   734         if (vec == NULL) {
   735           warning("Failed to allocate survivor plab array");
   736           for (int j = i; j > 0; j--) {
   737             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
   738           }
   739           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
   740           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
   741           _survivor_plab_array = NULL;
   742           _survivor_chunk_array = NULL;
   743           _survivor_chunk_capacity = 0;
   744           break;
   745         } else {
   746           ChunkArray* cur =
   747             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
   748                                                         max_plab_samples);
   749           assert(cur->end() == 0, "Should be 0");
   750           assert(cur->array() == vec, "Should be vec");
   751           assert(cur->capacity() == max_plab_samples, "Error");
   752         }
   753       }
   754     }
   755   }
   756   assert(   (   _survivor_plab_array  != NULL
   757              && _survivor_chunk_array != NULL)
   758          || (   _survivor_chunk_capacity == 0
   759              && _survivor_chunk_index == 0),
   760          "Error");
   762   // Choose what strong roots should be scanned depending on verification options
   763   // and perm gen collection mode.
   764   if (!CMSClassUnloadingEnabled) {
   765     // If class unloading is disabled we want to include all classes into the root set.
   766     add_root_scanning_option(SharedHeap::SO_AllClasses);
   767   } else {
   768     add_root_scanning_option(SharedHeap::SO_SystemClasses);
   769   }
   771   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
   772   _gc_counters = new CollectorCounters("CMS", 1);
   773   _completed_initialization = true;
   774   _sweep_timer.start();  // start of time
   775 }
   777 const char* ConcurrentMarkSweepGeneration::name() const {
   778   return "concurrent mark-sweep generation";
   779 }
   780 void ConcurrentMarkSweepGeneration::update_counters() {
   781   if (UsePerfData) {
   782     _space_counters->update_all();
   783     _gen_counters->update_all();
   784   }
   785 }
   787 // this is an optimized version of update_counters(). it takes the
   788 // used value as a parameter rather than computing it.
   789 //
   790 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
   791   if (UsePerfData) {
   792     _space_counters->update_used(used);
   793     _space_counters->update_capacity();
   794     _gen_counters->update_all();
   795   }
   796 }
   798 void ConcurrentMarkSweepGeneration::print() const {
   799   Generation::print();
   800   cmsSpace()->print();
   801 }
   803 #ifndef PRODUCT
   804 void ConcurrentMarkSweepGeneration::print_statistics() {
   805   cmsSpace()->printFLCensus(0);
   806 }
   807 #endif
   809 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
   810   GenCollectedHeap* gch = GenCollectedHeap::heap();
   811   if (PrintGCDetails) {
   812     if (Verbose) {
   813       gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
   814         level(), short_name(), s, used(), capacity());
   815     } else {
   816       gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
   817         level(), short_name(), s, used() / K, capacity() / K);
   818     }
   819   }
   820   if (Verbose) {
   821     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
   822               gch->used(), gch->capacity());
   823   } else {
   824     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
   825               gch->used() / K, gch->capacity() / K);
   826   }
   827 }
   829 size_t
   830 ConcurrentMarkSweepGeneration::contiguous_available() const {
   831   // dld proposes an improvement in precision here. If the committed
   832   // part of the space ends in a free block we should add that to
   833   // uncommitted size in the calculation below. Will make this
   834   // change later, staying with the approximation below for the
   835   // time being. -- ysr.
   836   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
   837 }
   839 size_t
   840 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
   841   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
   842 }
   844 size_t ConcurrentMarkSweepGeneration::max_available() const {
   845   return free() + _virtual_space.uncommitted_size();
   846 }
   848 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
   849     size_t max_promotion_in_bytes,
   850     bool younger_handles_promotion_failure) const {
   852   // This is the most conservative test.  Full promotion is
   853   // guaranteed if this is used. The multiplicative factor is to
   854   // account for the worst case "dilatation".
   855   double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
   856   if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
   857     adjusted_max_promo_bytes = (double)max_uintx;
   858   }
   859   bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
   861   if (younger_handles_promotion_failure && !result) {
   862     // Full promotion is not guaranteed because fragmentation
   863     // of the cms generation can prevent the full promotion.
   864     result = (max_available() >= (size_t)adjusted_max_promo_bytes);
   866     if (!result) {
   867       // With promotion failure handling the test for the ability
   868       // to support the promotion does not have to be guaranteed.
   869       // Use an average of the amount promoted.
   870       result = max_available() >= (size_t)
   871         gc_stats()->avg_promoted()->padded_average();
   872       if (PrintGC && Verbose && result) {
   873         gclog_or_tty->print_cr(
   874           "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
   875           " max_available: " SIZE_FORMAT
   876           " avg_promoted: " SIZE_FORMAT,
   877           max_available(), (size_t)
   878           gc_stats()->avg_promoted()->padded_average());
   879       }
   880     } else {
   881       if (PrintGC && Verbose) {
   882         gclog_or_tty->print_cr(
   883           "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
   884           " max_available: " SIZE_FORMAT
   885           " adj_max_promo_bytes: " SIZE_FORMAT,
   886           max_available(), (size_t)adjusted_max_promo_bytes);
   887       }
   888     }
   889   } else {
   890     if (PrintGC && Verbose) {
   891       gclog_or_tty->print_cr(
   892         "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
   893         " contiguous_available: " SIZE_FORMAT
   894         " adj_max_promo_bytes: " SIZE_FORMAT,
   895         max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
   896     }
   897   }
   898   return result;
   899 }
   901 CompactibleSpace*
   902 ConcurrentMarkSweepGeneration::first_compaction_space() const {
   903   return _cmsSpace;
   904 }
   906 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
   907   // Clear the promotion information.  These pointers can be adjusted
   908   // along with all the other pointers into the heap but
   909   // compaction is expected to be a rare event with
   910   // a heap using cms so don't do it without seeing the need.
   911   if (ParallelGCThreads > 0) {
   912     for (uint i = 0; i < ParallelGCThreads; i++) {
   913       _par_gc_thread_states[i]->promo.reset();
   914     }
   915   }
   916 }
   918 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
   919   blk->do_space(_cmsSpace);
   920 }
   922 void ConcurrentMarkSweepGeneration::compute_new_size() {
   923   assert_locked_or_safepoint(Heap_lock);
   925   // If incremental collection failed, we just want to expand
   926   // to the limit.
   927   if (incremental_collection_failed()) {
   928     clear_incremental_collection_failed();
   929     grow_to_reserved();
   930     return;
   931   }
   933   size_t expand_bytes = 0;
   934   double free_percentage = ((double) free()) / capacity();
   935   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
   936   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
   938   // compute expansion delta needed for reaching desired free percentage
   939   if (free_percentage < desired_free_percentage) {
   940     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
   941     assert(desired_capacity >= capacity(), "invalid expansion size");
   942     expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
   943   }
   944   if (expand_bytes > 0) {
   945     if (PrintGCDetails && Verbose) {
   946       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
   947       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
   948       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
   949       gclog_or_tty->print_cr("  Desired free fraction %f",
   950         desired_free_percentage);
   951       gclog_or_tty->print_cr("  Maximum free fraction %f",
   952         maximum_free_percentage);
   953       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
   954       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
   955         desired_capacity/1000);
   956       int prev_level = level() - 1;
   957       if (prev_level >= 0) {
   958         size_t prev_size = 0;
   959         GenCollectedHeap* gch = GenCollectedHeap::heap();
   960         Generation* prev_gen = gch->_gens[prev_level];
   961         prev_size = prev_gen->capacity();
   962           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
   963                                  prev_size/1000);
   964       }
   965       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
   966         unsafe_max_alloc_nogc()/1000);
   967       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
   968         contiguous_available()/1000);
   969       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
   970         expand_bytes);
   971     }
   972     // safe if expansion fails
   973     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
   974     if (PrintGCDetails && Verbose) {
   975       gclog_or_tty->print_cr("  Expanded free fraction %f",
   976         ((double) free()) / capacity());
   977     }
   978   }
   979 }
   981 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
   982   return cmsSpace()->freelistLock();
   983 }
   985 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
   986                                                   bool   tlab) {
   987   CMSSynchronousYieldRequest yr;
   988   MutexLockerEx x(freelistLock(),
   989                   Mutex::_no_safepoint_check_flag);
   990   return have_lock_and_allocate(size, tlab);
   991 }
   993 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
   994                                                   bool   tlab) {
   995   assert_lock_strong(freelistLock());
   996   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
   997   HeapWord* res = cmsSpace()->allocate(adjustedSize);
   998   // Allocate the object live (grey) if the background collector has
   999   // started marking. This is necessary because the marker may
  1000   // have passed this address and consequently this object will
  1001   // not otherwise be greyed and would be incorrectly swept up.
  1002   // Note that if this object contains references, the writing
  1003   // of those references will dirty the card containing this object
  1004   // allowing the object to be blackened (and its references scanned)
  1005   // either during a preclean phase or at the final checkpoint.
  1006   if (res != NULL) {
  1007     collector()->direct_allocated(res, adjustedSize);
  1008     _direct_allocated_words += adjustedSize;
  1009     // allocation counters
  1010     NOT_PRODUCT(
  1011       _numObjectsAllocated++;
  1012       _numWordsAllocated += (int)adjustedSize;
  1015   return res;
  1018 // In the case of direct allocation by mutators in a generation that
  1019 // is being concurrently collected, the object must be allocated
  1020 // live (grey) if the background collector has started marking.
  1021 // This is necessary because the marker may
  1022 // have passed this address and consequently this object will
  1023 // not otherwise be greyed and would be incorrectly swept up.
  1024 // Note that if this object contains references, the writing
  1025 // of those references will dirty the card containing this object
  1026 // allowing the object to be blackened (and its references scanned)
  1027 // either during a preclean phase or at the final checkpoint.
  1028 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
  1029   assert(_markBitMap.covers(start, size), "Out of bounds");
  1030   if (_collectorState >= Marking) {
  1031     MutexLockerEx y(_markBitMap.lock(),
  1032                     Mutex::_no_safepoint_check_flag);
  1033     // [see comments preceding SweepClosure::do_blk() below for details]
  1034     // 1. need to mark the object as live so it isn't collected
  1035     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
  1036     // 3. need to mark the end of the object so sweeper can skip over it
  1037     //    if it's uninitialized when the sweeper reaches it.
  1038     _markBitMap.mark(start);          // object is live
  1039     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
  1040     _markBitMap.mark(start + size - 1);
  1041                                       // mark end of object
  1043   // check that oop looks uninitialized
  1044   assert(oop(start)->klass() == NULL, "_klass should be NULL");
  1047 void CMSCollector::promoted(bool par, HeapWord* start,
  1048                             bool is_obj_array, size_t obj_size) {
  1049   assert(_markBitMap.covers(start), "Out of bounds");
  1050   // See comment in direct_allocated() about when objects should
  1051   // be allocated live.
  1052   if (_collectorState >= Marking) {
  1053     // we already hold the marking bit map lock, taken in
  1054     // the prologue
  1055     if (par) {
  1056       _markBitMap.par_mark(start);
  1057     } else {
  1058       _markBitMap.mark(start);
  1060     // We don't need to mark the object as uninitialized (as
  1061     // in direct_allocated above) because this is being done with the
  1062     // world stopped and the object will be initialized by the
  1063     // time the sweeper gets to look at it.
  1064     assert(SafepointSynchronize::is_at_safepoint(),
  1065            "expect promotion only at safepoints");
  1067     if (_collectorState < Sweeping) {
  1068       // Mark the appropriate cards in the modUnionTable, so that
  1069       // this object gets scanned before the sweep. If this is
  1070       // not done, CMS generation references in the object might
  1071       // not get marked.
  1072       // For the case of arrays, which are otherwise precisely
  1073       // marked, we need to dirty the entire array, not just its head.
  1074       if (is_obj_array) {
  1075         // The [par_]mark_range() method expects mr.end() below to
  1076         // be aligned to the granularity of a bit's representation
  1077         // in the heap. In the case of the MUT below, that's a
  1078         // card size.
  1079         MemRegion mr(start,
  1080                      (HeapWord*)round_to((intptr_t)(start + obj_size),
  1081                         CardTableModRefBS::card_size /* bytes */));
  1082         if (par) {
  1083           _modUnionTable.par_mark_range(mr);
  1084         } else {
  1085           _modUnionTable.mark_range(mr);
  1087       } else {  // not an obj array; we can just mark the head
  1088         if (par) {
  1089           _modUnionTable.par_mark(start);
  1090         } else {
  1091           _modUnionTable.mark(start);
  1098 static inline size_t percent_of_space(Space* space, HeapWord* addr)
  1100   size_t delta = pointer_delta(addr, space->bottom());
  1101   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
  1104 void CMSCollector::icms_update_allocation_limits()
  1106   Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
  1107   EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
  1109   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
  1110   if (CMSTraceIncrementalPacing) {
  1111     stats().print();
  1114   assert(duty_cycle <= 100, "invalid duty cycle");
  1115   if (duty_cycle != 0) {
  1116     // The duty_cycle is a percentage between 0 and 100; convert to words and
  1117     // then compute the offset from the endpoints of the space.
  1118     size_t free_words = eden->free() / HeapWordSize;
  1119     double free_words_dbl = (double)free_words;
  1120     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
  1121     size_t offset_words = (free_words - duty_cycle_words) / 2;
  1123     _icms_start_limit = eden->top() + offset_words;
  1124     _icms_stop_limit = eden->end() - offset_words;
  1126     // The limits may be adjusted (shifted to the right) by
  1127     // CMSIncrementalOffset, to allow the application more mutator time after a
  1128     // young gen gc (when all mutators were stopped) and before CMS starts and
  1129     // takes away one or more cpus.
  1130     if (CMSIncrementalOffset != 0) {
  1131       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
  1132       size_t adjustment = (size_t)adjustment_dbl;
  1133       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
  1134       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
  1135         _icms_start_limit += adjustment;
  1136         _icms_stop_limit = tmp_stop;
  1140   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
  1141     _icms_start_limit = _icms_stop_limit = eden->end();
  1144   // Install the new start limit.
  1145   eden->set_soft_end(_icms_start_limit);
  1147   if (CMSTraceIncrementalMode) {
  1148     gclog_or_tty->print(" icms alloc limits:  "
  1149                            PTR_FORMAT "," PTR_FORMAT
  1150                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
  1151                            _icms_start_limit, _icms_stop_limit,
  1152                            percent_of_space(eden, _icms_start_limit),
  1153                            percent_of_space(eden, _icms_stop_limit));
  1154     if (Verbose) {
  1155       gclog_or_tty->print("eden:  ");
  1156       eden->print_on(gclog_or_tty);
  1161 // Any changes here should try to maintain the invariant
  1162 // that if this method is called with _icms_start_limit
  1163 // and _icms_stop_limit both NULL, then it should return NULL
  1164 // and not notify the icms thread.
  1165 HeapWord*
  1166 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
  1167                                        size_t word_size)
  1169   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
  1170   // nop.
  1171   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
  1172     if (top <= _icms_start_limit) {
  1173       if (CMSTraceIncrementalMode) {
  1174         space->print_on(gclog_or_tty);
  1175         gclog_or_tty->stamp();
  1176         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
  1177                                ", new limit=" PTR_FORMAT
  1178                                " (" SIZE_FORMAT "%%)",
  1179                                top, _icms_stop_limit,
  1180                                percent_of_space(space, _icms_stop_limit));
  1182       ConcurrentMarkSweepThread::start_icms();
  1183       assert(top < _icms_stop_limit, "Tautology");
  1184       if (word_size < pointer_delta(_icms_stop_limit, top)) {
  1185         return _icms_stop_limit;
  1188       // The allocation will cross both the _start and _stop limits, so do the
  1189       // stop notification also and return end().
  1190       if (CMSTraceIncrementalMode) {
  1191         space->print_on(gclog_or_tty);
  1192         gclog_or_tty->stamp();
  1193         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
  1194                                ", new limit=" PTR_FORMAT
  1195                                " (" SIZE_FORMAT "%%)",
  1196                                top, space->end(),
  1197                                percent_of_space(space, space->end()));
  1199       ConcurrentMarkSweepThread::stop_icms();
  1200       return space->end();
  1203     if (top <= _icms_stop_limit) {
  1204       if (CMSTraceIncrementalMode) {
  1205         space->print_on(gclog_or_tty);
  1206         gclog_or_tty->stamp();
  1207         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
  1208                                ", new limit=" PTR_FORMAT
  1209                                " (" SIZE_FORMAT "%%)",
  1210                                top, space->end(),
  1211                                percent_of_space(space, space->end()));
  1213       ConcurrentMarkSweepThread::stop_icms();
  1214       return space->end();
  1217     if (CMSTraceIncrementalMode) {
  1218       space->print_on(gclog_or_tty);
  1219       gclog_or_tty->stamp();
  1220       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
  1221                              ", new limit=" PTR_FORMAT,
  1222                              top, NULL);
  1226   return NULL;
  1229 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
  1230   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1231   // allocate, copy and if necessary update promoinfo --
  1232   // delegate to underlying space.
  1233   assert_lock_strong(freelistLock());
  1235 #ifndef PRODUCT
  1236   if (Universe::heap()->promotion_should_fail()) {
  1237     return NULL;
  1239 #endif  // #ifndef PRODUCT
  1241   oop res = _cmsSpace->promote(obj, obj_size);
  1242   if (res == NULL) {
  1243     // expand and retry
  1244     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
  1245     expand(s*HeapWordSize, MinHeapDeltaBytes,
  1246       CMSExpansionCause::_satisfy_promotion);
  1247     // Since there's currently no next generation, we don't try to promote
  1248     // into a more senior generation.
  1249     assert(next_gen() == NULL, "assumption, based upon which no attempt "
  1250                                "is made to pass on a possibly failing "
  1251                                "promotion to next generation");
  1252     res = _cmsSpace->promote(obj, obj_size);
  1254   if (res != NULL) {
  1255     // See comment in allocate() about when objects should
  1256     // be allocated live.
  1257     assert(obj->is_oop(), "Will dereference klass pointer below");
  1258     collector()->promoted(false,           // Not parallel
  1259                           (HeapWord*)res, obj->is_objArray(), obj_size);
  1260     // promotion counters
  1261     NOT_PRODUCT(
  1262       _numObjectsPromoted++;
  1263       _numWordsPromoted +=
  1264         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
  1267   return res;
  1271 HeapWord*
  1272 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
  1273                                              HeapWord* top,
  1274                                              size_t word_sz)
  1276   return collector()->allocation_limit_reached(space, top, word_sz);
  1279 // Things to support parallel young-gen collection.
  1280 oop
  1281 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
  1282                                            oop old, markOop m,
  1283                                            size_t word_sz) {
  1284 #ifndef PRODUCT
  1285   if (Universe::heap()->promotion_should_fail()) {
  1286     return NULL;
  1288 #endif  // #ifndef PRODUCT
  1290   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
  1291   PromotionInfo* promoInfo = &ps->promo;
  1292   // if we are tracking promotions, then first ensure space for
  1293   // promotion (including spooling space for saving header if necessary).
  1294   // then allocate and copy, then track promoted info if needed.
  1295   // When tracking (see PromotionInfo::track()), the mark word may
  1296   // be displaced and in this case restoration of the mark word
  1297   // occurs in the (oop_since_save_marks_)iterate phase.
  1298   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
  1299     // Out of space for allocating spooling buffers;
  1300     // try expanding and allocating spooling buffers.
  1301     if (!expand_and_ensure_spooling_space(promoInfo)) {
  1302       return NULL;
  1305   assert(promoInfo->has_spooling_space(), "Control point invariant");
  1306   HeapWord* obj_ptr = ps->lab.alloc(word_sz);
  1307   if (obj_ptr == NULL) {
  1308      obj_ptr = expand_and_par_lab_allocate(ps, word_sz);
  1309      if (obj_ptr == NULL) {
  1310        return NULL;
  1313   oop obj = oop(obj_ptr);
  1314   assert(obj->klass() == NULL, "Object should be uninitialized here.");
  1315   // Otherwise, copy the object.  Here we must be careful to insert the
  1316   // klass pointer last, since this marks the block as an allocated object.
  1317   HeapWord* old_ptr = (HeapWord*)old;
  1318   if (word_sz > (size_t)oopDesc::header_size()) {
  1319     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
  1320                                  obj_ptr + oopDesc::header_size(),
  1321                                  word_sz - oopDesc::header_size());
  1323   // Restore the mark word copied above.
  1324   obj->set_mark(m);
  1325   // Now we can track the promoted object, if necessary.  We take care
  1326   // To delay the transition from uninitialized to full object
  1327   // (i.e., insertion of klass pointer) until after, so that it
  1328   // atomically becomes a promoted object.
  1329   if (promoInfo->tracking()) {
  1330     promoInfo->track((PromotedObject*)obj, old->klass());
  1332   // Finally, install the klass pointer.
  1333   obj->set_klass(old->klass());
  1335   assert(old->is_oop(), "Will dereference klass ptr below");
  1336   collector()->promoted(true,          // parallel
  1337                         obj_ptr, old->is_objArray(), word_sz);
  1339   NOT_PRODUCT(
  1340     Atomic::inc(&_numObjectsPromoted);
  1341     Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()),
  1342                 &_numWordsPromoted);
  1345   return obj;
  1348 void
  1349 ConcurrentMarkSweepGeneration::
  1350 par_promote_alloc_undo(int thread_num,
  1351                        HeapWord* obj, size_t word_sz) {
  1352   // CMS does not support promotion undo.
  1353   ShouldNotReachHere();
  1356 void
  1357 ConcurrentMarkSweepGeneration::
  1358 par_promote_alloc_done(int thread_num) {
  1359   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
  1360   ps->lab.retire();
  1361 #if CFLS_LAB_REFILL_STATS
  1362   if (thread_num == 0) {
  1363     _cmsSpace->print_par_alloc_stats();
  1365 #endif
  1368 void
  1369 ConcurrentMarkSweepGeneration::
  1370 par_oop_since_save_marks_iterate_done(int thread_num) {
  1371   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
  1372   ParScanWithoutBarrierClosure* dummy_cl = NULL;
  1373   ps->promo.promoted_oops_iterate_nv(dummy_cl);
  1376 // XXXPERM
  1377 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
  1378                                                    size_t size,
  1379                                                    bool   tlab)
  1381   // We allow a STW collection only if a full
  1382   // collection was requested.
  1383   return full || should_allocate(size, tlab); // FIX ME !!!
  1384   // This and promotion failure handling are connected at the
  1385   // hip and should be fixed by untying them.
  1388 bool CMSCollector::shouldConcurrentCollect() {
  1389   if (_full_gc_requested) {
  1390     assert(ExplicitGCInvokesConcurrent, "Unexpected state");
  1391     if (Verbose && PrintGCDetails) {
  1392       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
  1393                              " gc request");
  1395     return true;
  1398   // For debugging purposes, change the type of collection.
  1399   // If the rotation is not on the concurrent collection
  1400   // type, don't start a concurrent collection.
  1401   NOT_PRODUCT(
  1402     if (RotateCMSCollectionTypes &&
  1403         (_cmsGen->debug_collection_type() !=
  1404           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
  1405       assert(_cmsGen->debug_collection_type() !=
  1406         ConcurrentMarkSweepGeneration::Unknown_collection_type,
  1407         "Bad cms collection type");
  1408       return false;
  1412   FreelistLocker x(this);
  1413   // ------------------------------------------------------------------
  1414   // Print out lots of information which affects the initiation of
  1415   // a collection.
  1416   if (PrintCMSInitiationStatistics && stats().valid()) {
  1417     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
  1418     gclog_or_tty->stamp();
  1419     gclog_or_tty->print_cr("");
  1420     stats().print_on(gclog_or_tty);
  1421     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
  1422       stats().time_until_cms_gen_full());
  1423     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
  1424     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
  1425                            _cmsGen->contiguous_available());
  1426     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
  1427     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
  1428     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
  1429     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
  1430     gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy());
  1432   // ------------------------------------------------------------------
  1434   // If the estimated time to complete a cms collection (cms_duration())
  1435   // is less than the estimated time remaining until the cms generation
  1436   // is full, start a collection.
  1437   if (!UseCMSInitiatingOccupancyOnly) {
  1438     if (stats().valid()) {
  1439       if (stats().time_until_cms_start() == 0.0) {
  1440         return true;
  1442     } else {
  1443       // We want to conservatively collect somewhat early in order
  1444       // to try and "bootstrap" our CMS/promotion statistics;
  1445       // this branch will not fire after the first successful CMS
  1446       // collection because the stats should then be valid.
  1447       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
  1448         if (Verbose && PrintGCDetails) {
  1449           gclog_or_tty->print_cr(
  1450             " CMSCollector: collect for bootstrapping statistics:"
  1451             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
  1452             _bootstrap_occupancy);
  1454         return true;
  1459   // Otherwise, we start a collection cycle if either the perm gen or
  1460   // old gen want a collection cycle started. Each may use
  1461   // an appropriate criterion for making this decision.
  1462   // XXX We need to make sure that the gen expansion
  1463   // criterion dovetails well with this. XXX NEED TO FIX THIS
  1464   if (_cmsGen->should_concurrent_collect()) {
  1465     if (Verbose && PrintGCDetails) {
  1466       gclog_or_tty->print_cr("CMS old gen initiated");
  1468     return true;
  1471   // We start a collection if we believe an incremental collection may fail;
  1472   // this is not likely to be productive in practice because it's probably too
  1473   // late anyway.
  1474   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1475   assert(gch->collector_policy()->is_two_generation_policy(),
  1476          "You may want to check the correctness of the following");
  1477   if (gch->incremental_collection_will_fail()) {
  1478     if (PrintGCDetails && Verbose) {
  1479       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
  1481     return true;
  1484   if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) {
  1485     bool res = update_should_unload_classes();
  1486     if (res) {
  1487       if (Verbose && PrintGCDetails) {
  1488         gclog_or_tty->print_cr("CMS perm gen initiated");
  1490       return true;
  1493   return false;
  1496 // Clear _expansion_cause fields of constituent generations
  1497 void CMSCollector::clear_expansion_cause() {
  1498   _cmsGen->clear_expansion_cause();
  1499   _permGen->clear_expansion_cause();
  1502 // We should be conservative in starting a collection cycle.  To
  1503 // start too eagerly runs the risk of collecting too often in the
  1504 // extreme.  To collect too rarely falls back on full collections,
  1505 // which works, even if not optimum in terms of concurrent work.
  1506 // As a work around for too eagerly collecting, use the flag
  1507 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
  1508 // giving the user an easily understandable way of controlling the
  1509 // collections.
  1510 // We want to start a new collection cycle if any of the following
  1511 // conditions hold:
  1512 // . our current occupancy exceeds the configured initiating occupancy
  1513 //   for this generation, or
  1514 // . we recently needed to expand this space and have not, since that
  1515 //   expansion, done a collection of this generation, or
  1516 // . the underlying space believes that it may be a good idea to initiate
  1517 //   a concurrent collection (this may be based on criteria such as the
  1518 //   following: the space uses linear allocation and linear allocation is
  1519 //   going to fail, or there is believed to be excessive fragmentation in
  1520 //   the generation, etc... or ...
  1521 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
  1522 //   the case of the old generation, not the perm generation; see CR 6543076):
  1523 //   we may be approaching a point at which allocation requests may fail because
  1524 //   we will be out of sufficient free space given allocation rate estimates.]
  1525 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
  1527   assert_lock_strong(freelistLock());
  1528   if (occupancy() > initiating_occupancy()) {
  1529     if (PrintGCDetails && Verbose) {
  1530       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
  1531         short_name(), occupancy(), initiating_occupancy());
  1533     return true;
  1535   if (UseCMSInitiatingOccupancyOnly) {
  1536     return false;
  1538   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
  1539     if (PrintGCDetails && Verbose) {
  1540       gclog_or_tty->print(" %s: collect because expanded for allocation ",
  1541         short_name());
  1543     return true;
  1545   if (_cmsSpace->should_concurrent_collect()) {
  1546     if (PrintGCDetails && Verbose) {
  1547       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
  1548         short_name());
  1550     return true;
  1552   return false;
  1555 void ConcurrentMarkSweepGeneration::collect(bool   full,
  1556                                             bool   clear_all_soft_refs,
  1557                                             size_t size,
  1558                                             bool   tlab)
  1560   collector()->collect(full, clear_all_soft_refs, size, tlab);
  1563 void CMSCollector::collect(bool   full,
  1564                            bool   clear_all_soft_refs,
  1565                            size_t size,
  1566                            bool   tlab)
  1568   if (!UseCMSCollectionPassing && _collectorState > Idling) {
  1569     // For debugging purposes skip the collection if the state
  1570     // is not currently idle
  1571     if (TraceCMSState) {
  1572       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
  1573         Thread::current(), full, _collectorState);
  1575     return;
  1578   // The following "if" branch is present for defensive reasons.
  1579   // In the current uses of this interface, it can be replaced with:
  1580   // assert(!GC_locker.is_active(), "Can't be called otherwise");
  1581   // But I am not placing that assert here to allow future
  1582   // generality in invoking this interface.
  1583   if (GC_locker::is_active()) {
  1584     // A consistency test for GC_locker
  1585     assert(GC_locker::needs_gc(), "Should have been set already");
  1586     // Skip this foreground collection, instead
  1587     // expanding the heap if necessary.
  1588     // Need the free list locks for the call to free() in compute_new_size()
  1589     compute_new_size();
  1590     return;
  1592   acquire_control_and_collect(full, clear_all_soft_refs);
  1593   _full_gcs_since_conc_gc++;
  1597 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
  1598   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1599   unsigned int gc_count = gch->total_full_collections();
  1600   if (gc_count == full_gc_count) {
  1601     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
  1602     _full_gc_requested = true;
  1603     CGC_lock->notify();   // nudge CMS thread
  1608 // The foreground and background collectors need to coordinate in order
  1609 // to make sure that they do not mutually interfere with CMS collections.
  1610 // When a background collection is active,
  1611 // the foreground collector may need to take over (preempt) and
  1612 // synchronously complete an ongoing collection. Depending on the
  1613 // frequency of the background collections and the heap usage
  1614 // of the application, this preemption can be seldom or frequent.
  1615 // There are only certain
  1616 // points in the background collection that the "collection-baton"
  1617 // can be passed to the foreground collector.
  1618 //
  1619 // The foreground collector will wait for the baton before
  1620 // starting any part of the collection.  The foreground collector
  1621 // will only wait at one location.
  1622 //
  1623 // The background collector will yield the baton before starting a new
  1624 // phase of the collection (e.g., before initial marking, marking from roots,
  1625 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
  1626 // of the loop which switches the phases. The background collector does some
  1627 // of the phases (initial mark, final re-mark) with the world stopped.
  1628 // Because of locking involved in stopping the world,
  1629 // the foreground collector should not block waiting for the background
  1630 // collector when it is doing a stop-the-world phase.  The background
  1631 // collector will yield the baton at an additional point just before
  1632 // it enters a stop-the-world phase.  Once the world is stopped, the
  1633 // background collector checks the phase of the collection.  If the
  1634 // phase has not changed, it proceeds with the collection.  If the
  1635 // phase has changed, it skips that phase of the collection.  See
  1636 // the comments on the use of the Heap_lock in collect_in_background().
  1637 //
  1638 // Variable used in baton passing.
  1639 //   _foregroundGCIsActive - Set to true by the foreground collector when
  1640 //      it wants the baton.  The foreground clears it when it has finished
  1641 //      the collection.
  1642 //   _foregroundGCShouldWait - Set to true by the background collector
  1643 //        when it is running.  The foreground collector waits while
  1644 //      _foregroundGCShouldWait is true.
  1645 //  CGC_lock - monitor used to protect access to the above variables
  1646 //      and to notify the foreground and background collectors.
  1647 //  _collectorState - current state of the CMS collection.
  1648 //
  1649 // The foreground collector
  1650 //   acquires the CGC_lock
  1651 //   sets _foregroundGCIsActive
  1652 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
  1653 //     various locks acquired in preparation for the collection
  1654 //     are released so as not to block the background collector
  1655 //     that is in the midst of a collection
  1656 //   proceeds with the collection
  1657 //   clears _foregroundGCIsActive
  1658 //   returns
  1659 //
  1660 // The background collector in a loop iterating on the phases of the
  1661 //      collection
  1662 //   acquires the CGC_lock
  1663 //   sets _foregroundGCShouldWait
  1664 //   if _foregroundGCIsActive is set
  1665 //     clears _foregroundGCShouldWait, notifies _CGC_lock
  1666 //     waits on _CGC_lock for _foregroundGCIsActive to become false
  1667 //     and exits the loop.
  1668 //   otherwise
  1669 //     proceed with that phase of the collection
  1670 //     if the phase is a stop-the-world phase,
  1671 //       yield the baton once more just before enqueueing
  1672 //       the stop-world CMS operation (executed by the VM thread).
  1673 //   returns after all phases of the collection are done
  1674 //
  1676 void CMSCollector::acquire_control_and_collect(bool full,
  1677         bool clear_all_soft_refs) {
  1678   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  1679   assert(!Thread::current()->is_ConcurrentGC_thread(),
  1680          "shouldn't try to acquire control from self!");
  1682   // Start the protocol for acquiring control of the
  1683   // collection from the background collector (aka CMS thread).
  1684   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
  1685          "VM thread should have CMS token");
  1686   // Remember the possibly interrupted state of an ongoing
  1687   // concurrent collection
  1688   CollectorState first_state = _collectorState;
  1690   // Signal to a possibly ongoing concurrent collection that
  1691   // we want to do a foreground collection.
  1692   _foregroundGCIsActive = true;
  1694   // Disable incremental mode during a foreground collection.
  1695   ICMSDisabler icms_disabler;
  1697   // release locks and wait for a notify from the background collector
  1698   // releasing the locks in only necessary for phases which
  1699   // do yields to improve the granularity of the collection.
  1700   assert_lock_strong(bitMapLock());
  1701   // We need to lock the Free list lock for the space that we are
  1702   // currently collecting.
  1703   assert(haveFreelistLocks(), "Must be holding free list locks");
  1704   bitMapLock()->unlock();
  1705   releaseFreelistLocks();
  1707     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  1708     if (_foregroundGCShouldWait) {
  1709       // We are going to be waiting for action for the CMS thread;
  1710       // it had better not be gone (for instance at shutdown)!
  1711       assert(ConcurrentMarkSweepThread::cmst() != NULL,
  1712              "CMS thread must be running");
  1713       // Wait here until the background collector gives us the go-ahead
  1714       ConcurrentMarkSweepThread::clear_CMS_flag(
  1715         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
  1716       // Get a possibly blocked CMS thread going:
  1717       //   Note that we set _foregroundGCIsActive true above,
  1718       //   without protection of the CGC_lock.
  1719       CGC_lock->notify();
  1720       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
  1721              "Possible deadlock");
  1722       while (_foregroundGCShouldWait) {
  1723         // wait for notification
  1724         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
  1725         // Possibility of delay/starvation here, since CMS token does
  1726         // not know to give priority to VM thread? Actually, i think
  1727         // there wouldn't be any delay/starvation, but the proof of
  1728         // that "fact" (?) appears non-trivial. XXX 20011219YSR
  1730       ConcurrentMarkSweepThread::set_CMS_flag(
  1731         ConcurrentMarkSweepThread::CMS_vm_has_token);
  1734   // The CMS_token is already held.  Get back the other locks.
  1735   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
  1736          "VM thread should have CMS token");
  1737   getFreelistLocks();
  1738   bitMapLock()->lock_without_safepoint_check();
  1739   if (TraceCMSState) {
  1740     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
  1741       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
  1742     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
  1745   // Check if we need to do a compaction, or if not, whether
  1746   // we need to start the mark-sweep from scratch.
  1747   bool should_compact    = false;
  1748   bool should_start_over = false;
  1749   decide_foreground_collection_type(clear_all_soft_refs,
  1750     &should_compact, &should_start_over);
  1752 NOT_PRODUCT(
  1753   if (RotateCMSCollectionTypes) {
  1754     if (_cmsGen->debug_collection_type() ==
  1755         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
  1756       should_compact = true;
  1757     } else if (_cmsGen->debug_collection_type() ==
  1758                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
  1759       should_compact = false;
  1764   if (PrintGCDetails && first_state > Idling) {
  1765     GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
  1766     if (GCCause::is_user_requested_gc(cause) ||
  1767         GCCause::is_serviceability_requested_gc(cause)) {
  1768       gclog_or_tty->print(" (concurrent mode interrupted)");
  1769     } else {
  1770       gclog_or_tty->print(" (concurrent mode failure)");
  1774   if (should_compact) {
  1775     // If the collection is being acquired from the background
  1776     // collector, there may be references on the discovered
  1777     // references lists that have NULL referents (being those
  1778     // that were concurrently cleared by a mutator) or
  1779     // that are no longer active (having been enqueued concurrently
  1780     // by the mutator).
  1781     // Scrub the list of those references because Mark-Sweep-Compact
  1782     // code assumes referents are not NULL and that all discovered
  1783     // Reference objects are active.
  1784     ref_processor()->clean_up_discovered_references();
  1786     do_compaction_work(clear_all_soft_refs);
  1788     // Has the GC time limit been exceeded?
  1789     check_gc_time_limit();
  1791   } else {
  1792     do_mark_sweep_work(clear_all_soft_refs, first_state,
  1793       should_start_over);
  1795   // Reset the expansion cause, now that we just completed
  1796   // a collection cycle.
  1797   clear_expansion_cause();
  1798   _foregroundGCIsActive = false;
  1799   return;
  1802 void CMSCollector::check_gc_time_limit() {
  1804   // Ignore explicit GC's.  Exiting here does not set the flag and
  1805   // does not reset the count.  Updating of the averages for system
  1806   // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
  1807   GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
  1808   if (GCCause::is_user_requested_gc(gc_cause) ||
  1809       GCCause::is_serviceability_requested_gc(gc_cause)) {
  1810     return;
  1813   // Calculate the fraction of the CMS generation was freed during
  1814   // the last collection.
  1815   // Only consider the STW compacting cost for now.
  1816   //
  1817   // Note that the gc time limit test only works for the collections
  1818   // of the young gen + tenured gen and not for collections of the
  1819   // permanent gen.  That is because the calculation of the space
  1820   // freed by the collection is the free space in the young gen +
  1821   // tenured gen.
  1823   double fraction_free =
  1824     ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
  1825   if ((100.0 * size_policy()->compacting_gc_cost()) >
  1826          ((double) GCTimeLimit) &&
  1827         ((fraction_free * 100) < GCHeapFreeLimit)) {
  1828     size_policy()->inc_gc_time_limit_count();
  1829     if (UseGCOverheadLimit &&
  1830         (size_policy()->gc_time_limit_count() >
  1831          AdaptiveSizePolicyGCTimeLimitThreshold)) {
  1832       size_policy()->set_gc_time_limit_exceeded(true);
  1833       // Avoid consecutive OOM due to the gc time limit by resetting
  1834       // the counter.
  1835       size_policy()->reset_gc_time_limit_count();
  1836       if (PrintGCDetails) {
  1837         gclog_or_tty->print_cr("      GC is exceeding overhead limit "
  1838           "of %d%%", GCTimeLimit);
  1840     } else {
  1841       if (PrintGCDetails) {
  1842         gclog_or_tty->print_cr("      GC would exceed overhead limit "
  1843           "of %d%%", GCTimeLimit);
  1846   } else {
  1847     size_policy()->reset_gc_time_limit_count();
  1851 // Resize the perm generation and the tenured generation
  1852 // after obtaining the free list locks for the
  1853 // two generations.
  1854 void CMSCollector::compute_new_size() {
  1855   assert_locked_or_safepoint(Heap_lock);
  1856   FreelistLocker z(this);
  1857   _permGen->compute_new_size();
  1858   _cmsGen->compute_new_size();
  1861 // A work method used by foreground collection to determine
  1862 // what type of collection (compacting or not, continuing or fresh)
  1863 // it should do.
  1864 // NOTE: the intent is to make UseCMSCompactAtFullCollection
  1865 // and CMSCompactWhenClearAllSoftRefs the default in the future
  1866 // and do away with the flags after a suitable period.
  1867 void CMSCollector::decide_foreground_collection_type(
  1868   bool clear_all_soft_refs, bool* should_compact,
  1869   bool* should_start_over) {
  1870   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
  1871   // flag is set, and we have either requested a System.gc() or
  1872   // the number of full gc's since the last concurrent cycle
  1873   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
  1874   // or if an incremental collection has failed
  1875   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1876   assert(gch->collector_policy()->is_two_generation_policy(),
  1877          "You may want to check the correctness of the following");
  1878   // Inform cms gen if this was due to partial collection failing.
  1879   // The CMS gen may use this fact to determine its expansion policy.
  1880   if (gch->incremental_collection_will_fail()) {
  1881     assert(!_cmsGen->incremental_collection_failed(),
  1882            "Should have been noticed, reacted to and cleared");
  1883     _cmsGen->set_incremental_collection_failed();
  1885   *should_compact =
  1886     UseCMSCompactAtFullCollection &&
  1887     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
  1888      GCCause::is_user_requested_gc(gch->gc_cause()) ||
  1889      gch->incremental_collection_will_fail());
  1890   *should_start_over = false;
  1891   if (clear_all_soft_refs && !*should_compact) {
  1892     // We are about to do a last ditch collection attempt
  1893     // so it would normally make sense to do a compaction
  1894     // to reclaim as much space as possible.
  1895     if (CMSCompactWhenClearAllSoftRefs) {
  1896       // Default: The rationale is that in this case either
  1897       // we are past the final marking phase, in which case
  1898       // we'd have to start over, or so little has been done
  1899       // that there's little point in saving that work. Compaction
  1900       // appears to be the sensible choice in either case.
  1901       *should_compact = true;
  1902     } else {
  1903       // We have been asked to clear all soft refs, but not to
  1904       // compact. Make sure that we aren't past the final checkpoint
  1905       // phase, for that is where we process soft refs. If we are already
  1906       // past that phase, we'll need to redo the refs discovery phase and
  1907       // if necessary clear soft refs that weren't previously
  1908       // cleared. We do so by remembering the phase in which
  1909       // we came in, and if we are past the refs processing
  1910       // phase, we'll choose to just redo the mark-sweep
  1911       // collection from scratch.
  1912       if (_collectorState > FinalMarking) {
  1913         // We are past the refs processing phase;
  1914         // start over and do a fresh synchronous CMS cycle
  1915         _collectorState = Resetting; // skip to reset to start new cycle
  1916         reset(false /* == !asynch */);
  1917         *should_start_over = true;
  1918       } // else we can continue a possibly ongoing current cycle
  1923 // A work method used by the foreground collector to do
  1924 // a mark-sweep-compact.
  1925 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
  1926   GenCollectedHeap* gch = GenCollectedHeap::heap();
  1927   TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
  1928   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
  1929     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
  1930       "collections passed to foreground collector", _full_gcs_since_conc_gc);
  1933   // Sample collection interval time and reset for collection pause.
  1934   if (UseAdaptiveSizePolicy) {
  1935     size_policy()->msc_collection_begin();
  1938   // Temporarily widen the span of the weak reference processing to
  1939   // the entire heap.
  1940   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
  1941   ReferenceProcessorSpanMutator x(ref_processor(), new_span);
  1943   // Temporarily, clear the "is_alive_non_header" field of the
  1944   // reference processor.
  1945   ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
  1947   // Temporarily make reference _processing_ single threaded (non-MT).
  1948   ReferenceProcessorMTProcMutator z(ref_processor(), false);
  1950   // Temporarily make refs discovery atomic
  1951   ReferenceProcessorAtomicMutator w(ref_processor(), true);
  1953   ref_processor()->set_enqueuing_is_done(false);
  1954   ref_processor()->enable_discovery();
  1955   // If an asynchronous collection finishes, the _modUnionTable is
  1956   // all clear.  If we are assuming the collection from an asynchronous
  1957   // collection, clear the _modUnionTable.
  1958   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
  1959     "_modUnionTable should be clear if the baton was not passed");
  1960   _modUnionTable.clear_all();
  1962   // We must adjust the allocation statistics being maintained
  1963   // in the free list space. We do so by reading and clearing
  1964   // the sweep timer and updating the block flux rate estimates below.
  1965   assert(_sweep_timer.is_active(), "We should never see the timer inactive");
  1966   _sweep_timer.stop();
  1967   // Note that we do not use this sample to update the _sweep_estimate.
  1968   _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
  1969                                           _sweep_estimate.padded_average());
  1971   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
  1972     ref_processor(), clear_all_soft_refs);
  1973   #ifdef ASSERT
  1974     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
  1975     size_t free_size = cms_space->free();
  1976     assert(free_size ==
  1977            pointer_delta(cms_space->end(), cms_space->compaction_top())
  1978            * HeapWordSize,
  1979       "All the free space should be compacted into one chunk at top");
  1980     assert(cms_space->dictionary()->totalChunkSize(
  1981                                       debug_only(cms_space->freelistLock())) == 0 ||
  1982            cms_space->totalSizeInIndexedFreeLists() == 0,
  1983       "All the free space should be in a single chunk");
  1984     size_t num = cms_space->totalCount();
  1985     assert((free_size == 0 && num == 0) ||
  1986            (free_size > 0  && (num == 1 || num == 2)),
  1987          "There should be at most 2 free chunks after compaction");
  1988   #endif // ASSERT
  1989   _collectorState = Resetting;
  1990   assert(_restart_addr == NULL,
  1991          "Should have been NULL'd before baton was passed");
  1992   reset(false /* == !asynch */);
  1993   _cmsGen->reset_after_compaction();
  1994   _concurrent_cycles_since_last_unload = 0;
  1996   if (verifying() && !should_unload_classes()) {
  1997     perm_gen_verify_bit_map()->clear_all();
  2000   // Clear any data recorded in the PLAB chunk arrays.
  2001   if (_survivor_plab_array != NULL) {
  2002     reset_survivor_plab_arrays();
  2005   // Adjust the per-size allocation stats for the next epoch.
  2006   _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
  2007   // Restart the "sweep timer" for next epoch.
  2008   _sweep_timer.reset();
  2009   _sweep_timer.start();
  2011   // Sample collection pause time and reset for collection interval.
  2012   if (UseAdaptiveSizePolicy) {
  2013     size_policy()->msc_collection_end(gch->gc_cause());
  2016   // For a mark-sweep-compact, compute_new_size() will be called
  2017   // in the heap's do_collection() method.
  2020 // A work method used by the foreground collector to do
  2021 // a mark-sweep, after taking over from a possibly on-going
  2022 // concurrent mark-sweep collection.
  2023 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
  2024   CollectorState first_state, bool should_start_over) {
  2025   if (PrintGC && Verbose) {
  2026     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
  2027       "collector with count %d",
  2028       _full_gcs_since_conc_gc);
  2030   switch (_collectorState) {
  2031     case Idling:
  2032       if (first_state == Idling || should_start_over) {
  2033         // The background GC was not active, or should
  2034         // restarted from scratch;  start the cycle.
  2035         _collectorState = InitialMarking;
  2037       // If first_state was not Idling, then a background GC
  2038       // was in progress and has now finished.  No need to do it
  2039       // again.  Leave the state as Idling.
  2040       break;
  2041     case Precleaning:
  2042       // In the foreground case don't do the precleaning since
  2043       // it is not done concurrently and there is extra work
  2044       // required.
  2045       _collectorState = FinalMarking;
  2047   if (PrintGCDetails &&
  2048       (_collectorState > Idling ||
  2049        !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
  2050     gclog_or_tty->print(" (concurrent mode failure)");
  2052   collect_in_foreground(clear_all_soft_refs);
  2054   // For a mark-sweep, compute_new_size() will be called
  2055   // in the heap's do_collection() method.
  2059 void CMSCollector::getFreelistLocks() const {
  2060   // Get locks for all free lists in all generations that this
  2061   // collector is responsible for
  2062   _cmsGen->freelistLock()->lock_without_safepoint_check();
  2063   _permGen->freelistLock()->lock_without_safepoint_check();
  2066 void CMSCollector::releaseFreelistLocks() const {
  2067   // Release locks for all free lists in all generations that this
  2068   // collector is responsible for
  2069   _cmsGen->freelistLock()->unlock();
  2070   _permGen->freelistLock()->unlock();
  2073 bool CMSCollector::haveFreelistLocks() const {
  2074   // Check locks for all free lists in all generations that this
  2075   // collector is responsible for
  2076   assert_lock_strong(_cmsGen->freelistLock());
  2077   assert_lock_strong(_permGen->freelistLock());
  2078   PRODUCT_ONLY(ShouldNotReachHere());
  2079   return true;
  2082 // A utility class that is used by the CMS collector to
  2083 // temporarily "release" the foreground collector from its
  2084 // usual obligation to wait for the background collector to
  2085 // complete an ongoing phase before proceeding.
  2086 class ReleaseForegroundGC: public StackObj {
  2087  private:
  2088   CMSCollector* _c;
  2089  public:
  2090   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
  2091     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
  2092     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2093     // allow a potentially blocked foreground collector to proceed
  2094     _c->_foregroundGCShouldWait = false;
  2095     if (_c->_foregroundGCIsActive) {
  2096       CGC_lock->notify();
  2098     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  2099            "Possible deadlock");
  2102   ~ReleaseForegroundGC() {
  2103     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
  2104     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2105     _c->_foregroundGCShouldWait = true;
  2107 };
  2109 // There are separate collect_in_background and collect_in_foreground because of
  2110 // the different locking requirements of the background collector and the
  2111 // foreground collector.  There was originally an attempt to share
  2112 // one "collect" method between the background collector and the foreground
  2113 // collector but the if-then-else required made it cleaner to have
  2114 // separate methods.
  2115 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
  2116   assert(Thread::current()->is_ConcurrentGC_thread(),
  2117     "A CMS asynchronous collection is only allowed on a CMS thread.");
  2119   GenCollectedHeap* gch = GenCollectedHeap::heap();
  2121     bool safepoint_check = Mutex::_no_safepoint_check_flag;
  2122     MutexLockerEx hl(Heap_lock, safepoint_check);
  2123     FreelistLocker fll(this);
  2124     MutexLockerEx x(CGC_lock, safepoint_check);
  2125     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
  2126       // The foreground collector is active or we're
  2127       // not using asynchronous collections.  Skip this
  2128       // background collection.
  2129       assert(!_foregroundGCShouldWait, "Should be clear");
  2130       return;
  2131     } else {
  2132       assert(_collectorState == Idling, "Should be idling before start.");
  2133       _collectorState = InitialMarking;
  2134       // Reset the expansion cause, now that we are about to begin
  2135       // a new cycle.
  2136       clear_expansion_cause();
  2138     // Decide if we want to enable class unloading as part of the
  2139     // ensuing concurrent GC cycle.
  2140     update_should_unload_classes();
  2141     _full_gc_requested = false;           // acks all outstanding full gc requests
  2142     // Signal that we are about to start a collection
  2143     gch->increment_total_full_collections();  // ... starting a collection cycle
  2144     _collection_count_start = gch->total_full_collections();
  2147   // Used for PrintGC
  2148   size_t prev_used;
  2149   if (PrintGC && Verbose) {
  2150     prev_used = _cmsGen->used(); // XXXPERM
  2153   // The change of the collection state is normally done at this level;
  2154   // the exceptions are phases that are executed while the world is
  2155   // stopped.  For those phases the change of state is done while the
  2156   // world is stopped.  For baton passing purposes this allows the
  2157   // background collector to finish the phase and change state atomically.
  2158   // The foreground collector cannot wait on a phase that is done
  2159   // while the world is stopped because the foreground collector already
  2160   // has the world stopped and would deadlock.
  2161   while (_collectorState != Idling) {
  2162     if (TraceCMSState) {
  2163       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
  2164         Thread::current(), _collectorState);
  2166     // The foreground collector
  2167     //   holds the Heap_lock throughout its collection.
  2168     //   holds the CMS token (but not the lock)
  2169     //     except while it is waiting for the background collector to yield.
  2170     //
  2171     // The foreground collector should be blocked (not for long)
  2172     //   if the background collector is about to start a phase
  2173     //   executed with world stopped.  If the background
  2174     //   collector has already started such a phase, the
  2175     //   foreground collector is blocked waiting for the
  2176     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
  2177     //   are executed in the VM thread.
  2178     //
  2179     // The locking order is
  2180     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
  2181     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
  2182     //   CMS token  (claimed in
  2183     //                stop_world_and_do() -->
  2184     //                  safepoint_synchronize() -->
  2185     //                    CMSThread::synchronize())
  2188       // Check if the FG collector wants us to yield.
  2189       CMSTokenSync x(true); // is cms thread
  2190       if (waitForForegroundGC()) {
  2191         // We yielded to a foreground GC, nothing more to be
  2192         // done this round.
  2193         assert(_foregroundGCShouldWait == false, "We set it to false in "
  2194                "waitForForegroundGC()");
  2195         if (TraceCMSState) {
  2196           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
  2197             " exiting collection CMS state %d",
  2198             Thread::current(), _collectorState);
  2200         return;
  2201       } else {
  2202         // The background collector can run but check to see if the
  2203         // foreground collector has done a collection while the
  2204         // background collector was waiting to get the CGC_lock
  2205         // above.  If yes, break so that _foregroundGCShouldWait
  2206         // is cleared before returning.
  2207         if (_collectorState == Idling) {
  2208           break;
  2213     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
  2214       "should be waiting");
  2216     switch (_collectorState) {
  2217       case InitialMarking:
  2219           ReleaseForegroundGC x(this);
  2220           stats().record_cms_begin();
  2222           VM_CMS_Initial_Mark initial_mark_op(this);
  2223           VMThread::execute(&initial_mark_op);
  2225         // The collector state may be any legal state at this point
  2226         // since the background collector may have yielded to the
  2227         // foreground collector.
  2228         break;
  2229       case Marking:
  2230         // initial marking in checkpointRootsInitialWork has been completed
  2231         if (markFromRoots(true)) { // we were successful
  2232           assert(_collectorState == Precleaning, "Collector state should "
  2233             "have changed");
  2234         } else {
  2235           assert(_foregroundGCIsActive, "Internal state inconsistency");
  2237         break;
  2238       case Precleaning:
  2239         if (UseAdaptiveSizePolicy) {
  2240           size_policy()->concurrent_precleaning_begin();
  2242         // marking from roots in markFromRoots has been completed
  2243         preclean();
  2244         if (UseAdaptiveSizePolicy) {
  2245           size_policy()->concurrent_precleaning_end();
  2247         assert(_collectorState == AbortablePreclean ||
  2248                _collectorState == FinalMarking,
  2249                "Collector state should have changed");
  2250         break;
  2251       case AbortablePreclean:
  2252         if (UseAdaptiveSizePolicy) {
  2253         size_policy()->concurrent_phases_resume();
  2255         abortable_preclean();
  2256         if (UseAdaptiveSizePolicy) {
  2257           size_policy()->concurrent_precleaning_end();
  2259         assert(_collectorState == FinalMarking, "Collector state should "
  2260           "have changed");
  2261         break;
  2262       case FinalMarking:
  2264           ReleaseForegroundGC x(this);
  2266           VM_CMS_Final_Remark final_remark_op(this);
  2267           VMThread::execute(&final_remark_op);
  2269         assert(_foregroundGCShouldWait, "block post-condition");
  2270         break;
  2271       case Sweeping:
  2272         if (UseAdaptiveSizePolicy) {
  2273           size_policy()->concurrent_sweeping_begin();
  2275         // final marking in checkpointRootsFinal has been completed
  2276         sweep(true);
  2277         assert(_collectorState == Resizing, "Collector state change "
  2278           "to Resizing must be done under the free_list_lock");
  2279         _full_gcs_since_conc_gc = 0;
  2281         // Stop the timers for adaptive size policy for the concurrent phases
  2282         if (UseAdaptiveSizePolicy) {
  2283           size_policy()->concurrent_sweeping_end();
  2284           size_policy()->concurrent_phases_end(gch->gc_cause(),
  2285                                              gch->prev_gen(_cmsGen)->capacity(),
  2286                                              _cmsGen->free());
  2289       case Resizing: {
  2290         // Sweeping has been completed...
  2291         // At this point the background collection has completed.
  2292         // Don't move the call to compute_new_size() down
  2293         // into code that might be executed if the background
  2294         // collection was preempted.
  2296           ReleaseForegroundGC x(this);   // unblock FG collection
  2297           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
  2298           CMSTokenSync        z(true);   // not strictly needed.
  2299           if (_collectorState == Resizing) {
  2300             compute_new_size();
  2301             _collectorState = Resetting;
  2302           } else {
  2303             assert(_collectorState == Idling, "The state should only change"
  2304                    " because the foreground collector has finished the collection");
  2307         break;
  2309       case Resetting:
  2310         // CMS heap resizing has been completed
  2311         reset(true);
  2312         assert(_collectorState == Idling, "Collector state should "
  2313           "have changed");
  2314         stats().record_cms_end();
  2315         // Don't move the concurrent_phases_end() and compute_new_size()
  2316         // calls to here because a preempted background collection
  2317         // has it's state set to "Resetting".
  2318         break;
  2319       case Idling:
  2320       default:
  2321         ShouldNotReachHere();
  2322         break;
  2324     if (TraceCMSState) {
  2325       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
  2326         Thread::current(), _collectorState);
  2328     assert(_foregroundGCShouldWait, "block post-condition");
  2331   // Should this be in gc_epilogue?
  2332   collector_policy()->counters()->update_counters();
  2335     // Clear _foregroundGCShouldWait and, in the event that the
  2336     // foreground collector is waiting, notify it, before
  2337     // returning.
  2338     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2339     _foregroundGCShouldWait = false;
  2340     if (_foregroundGCIsActive) {
  2341       CGC_lock->notify();
  2343     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  2344            "Possible deadlock");
  2346   if (TraceCMSState) {
  2347     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
  2348       " exiting collection CMS state %d",
  2349       Thread::current(), _collectorState);
  2351   if (PrintGC && Verbose) {
  2352     _cmsGen->print_heap_change(prev_used);
  2356 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
  2357   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
  2358          "Foreground collector should be waiting, not executing");
  2359   assert(Thread::current()->is_VM_thread(), "A foreground collection"
  2360     "may only be done by the VM Thread with the world stopped");
  2361   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
  2362          "VM thread should have CMS token");
  2364   NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
  2365     true, gclog_or_tty);)
  2366   if (UseAdaptiveSizePolicy) {
  2367     size_policy()->ms_collection_begin();
  2369   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
  2371   HandleMark hm;  // Discard invalid handles created during verification
  2373   if (VerifyBeforeGC &&
  2374       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2375     Universe::verify(true);
  2378   bool init_mark_was_synchronous = false; // until proven otherwise
  2379   while (_collectorState != Idling) {
  2380     if (TraceCMSState) {
  2381       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
  2382         Thread::current(), _collectorState);
  2384     switch (_collectorState) {
  2385       case InitialMarking:
  2386         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
  2387         checkpointRootsInitial(false);
  2388         assert(_collectorState == Marking, "Collector state should have changed"
  2389           " within checkpointRootsInitial()");
  2390         break;
  2391       case Marking:
  2392         // initial marking in checkpointRootsInitialWork has been completed
  2393         if (VerifyDuringGC &&
  2394             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2395           gclog_or_tty->print("Verify before initial mark: ");
  2396           Universe::verify(true);
  2399           bool res = markFromRoots(false);
  2400           assert(res && _collectorState == FinalMarking, "Collector state should "
  2401             "have changed");
  2402           break;
  2404       case FinalMarking:
  2405         if (VerifyDuringGC &&
  2406             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2407           gclog_or_tty->print("Verify before re-mark: ");
  2408           Universe::verify(true);
  2410         checkpointRootsFinal(false, clear_all_soft_refs,
  2411                              init_mark_was_synchronous);
  2412         assert(_collectorState == Sweeping, "Collector state should not "
  2413           "have changed within checkpointRootsFinal()");
  2414         break;
  2415       case Sweeping:
  2416         // final marking in checkpointRootsFinal has been completed
  2417         if (VerifyDuringGC &&
  2418             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2419           gclog_or_tty->print("Verify before sweep: ");
  2420           Universe::verify(true);
  2422         sweep(false);
  2423         assert(_collectorState == Resizing, "Incorrect state");
  2424         break;
  2425       case Resizing: {
  2426         // Sweeping has been completed; the actual resize in this case
  2427         // is done separately; nothing to be done in this state.
  2428         _collectorState = Resetting;
  2429         break;
  2431       case Resetting:
  2432         // The heap has been resized.
  2433         if (VerifyDuringGC &&
  2434             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2435           gclog_or_tty->print("Verify before reset: ");
  2436           Universe::verify(true);
  2438         reset(false);
  2439         assert(_collectorState == Idling, "Collector state should "
  2440           "have changed");
  2441         break;
  2442       case Precleaning:
  2443       case AbortablePreclean:
  2444         // Elide the preclean phase
  2445         _collectorState = FinalMarking;
  2446         break;
  2447       default:
  2448         ShouldNotReachHere();
  2450     if (TraceCMSState) {
  2451       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
  2452         Thread::current(), _collectorState);
  2456   if (UseAdaptiveSizePolicy) {
  2457     GenCollectedHeap* gch = GenCollectedHeap::heap();
  2458     size_policy()->ms_collection_end(gch->gc_cause());
  2461   if (VerifyAfterGC &&
  2462       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  2463     Universe::verify(true);
  2465   if (TraceCMSState) {
  2466     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
  2467       " exiting collection CMS state %d",
  2468       Thread::current(), _collectorState);
  2472 bool CMSCollector::waitForForegroundGC() {
  2473   bool res = false;
  2474   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  2475          "CMS thread should have CMS token");
  2476   // Block the foreground collector until the
  2477   // background collectors decides whether to
  2478   // yield.
  2479   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
  2480   _foregroundGCShouldWait = true;
  2481   if (_foregroundGCIsActive) {
  2482     // The background collector yields to the
  2483     // foreground collector and returns a value
  2484     // indicating that it has yielded.  The foreground
  2485     // collector can proceed.
  2486     res = true;
  2487     _foregroundGCShouldWait = false;
  2488     ConcurrentMarkSweepThread::clear_CMS_flag(
  2489       ConcurrentMarkSweepThread::CMS_cms_has_token);
  2490     ConcurrentMarkSweepThread::set_CMS_flag(
  2491       ConcurrentMarkSweepThread::CMS_cms_wants_token);
  2492     // Get a possibly blocked foreground thread going
  2493     CGC_lock->notify();
  2494     if (TraceCMSState) {
  2495       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
  2496         Thread::current(), _collectorState);
  2498     while (_foregroundGCIsActive) {
  2499       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
  2501     ConcurrentMarkSweepThread::set_CMS_flag(
  2502       ConcurrentMarkSweepThread::CMS_cms_has_token);
  2503     ConcurrentMarkSweepThread::clear_CMS_flag(
  2504       ConcurrentMarkSweepThread::CMS_cms_wants_token);
  2506   if (TraceCMSState) {
  2507     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
  2508       Thread::current(), _collectorState);
  2510   return res;
  2513 // Because of the need to lock the free lists and other structures in
  2514 // the collector, common to all the generations that the collector is
  2515 // collecting, we need the gc_prologues of individual CMS generations
  2516 // delegate to their collector. It may have been simpler had the
  2517 // current infrastructure allowed one to call a prologue on a
  2518 // collector. In the absence of that we have the generation's
  2519 // prologue delegate to the collector, which delegates back
  2520 // some "local" work to a worker method in the individual generations
  2521 // that it's responsible for collecting, while itself doing any
  2522 // work common to all generations it's responsible for. A similar
  2523 // comment applies to the  gc_epilogue()'s.
  2524 // The role of the varaible _between_prologue_and_epilogue is to
  2525 // enforce the invocation protocol.
  2526 void CMSCollector::gc_prologue(bool full) {
  2527   // Call gc_prologue_work() for each CMSGen and PermGen that
  2528   // we are responsible for.
  2530   // The following locking discipline assumes that we are only called
  2531   // when the world is stopped.
  2532   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
  2534   // The CMSCollector prologue must call the gc_prologues for the
  2535   // "generations" (including PermGen if any) that it's responsible
  2536   // for.
  2538   assert(   Thread::current()->is_VM_thread()
  2539          || (   CMSScavengeBeforeRemark
  2540              && Thread::current()->is_ConcurrentGC_thread()),
  2541          "Incorrect thread type for prologue execution");
  2543   if (_between_prologue_and_epilogue) {
  2544     // We have already been invoked; this is a gc_prologue delegation
  2545     // from yet another CMS generation that we are responsible for, just
  2546     // ignore it since all relevant work has already been done.
  2547     return;
  2550   // set a bit saying prologue has been called; cleared in epilogue
  2551   _between_prologue_and_epilogue = true;
  2552   // Claim locks for common data structures, then call gc_prologue_work()
  2553   // for each CMSGen and PermGen that we are responsible for.
  2555   getFreelistLocks();   // gets free list locks on constituent spaces
  2556   bitMapLock()->lock_without_safepoint_check();
  2558   // Should call gc_prologue_work() for all cms gens we are responsible for
  2559   bool registerClosure =    _collectorState >= Marking
  2560                          && _collectorState < Sweeping;
  2561   ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar
  2562                                                : &_modUnionClosure;
  2563   _cmsGen->gc_prologue_work(full, registerClosure, muc);
  2564   _permGen->gc_prologue_work(full, registerClosure, muc);
  2566   if (!full) {
  2567     stats().record_gc0_begin();
  2571 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
  2572   // Delegate to CMScollector which knows how to coordinate between
  2573   // this and any other CMS generations that it is responsible for
  2574   // collecting.
  2575   collector()->gc_prologue(full);
  2578 // This is a "private" interface for use by this generation's CMSCollector.
  2579 // Not to be called directly by any other entity (for instance,
  2580 // GenCollectedHeap, which calls the "public" gc_prologue method above).
  2581 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
  2582   bool registerClosure, ModUnionClosure* modUnionClosure) {
  2583   assert(!incremental_collection_failed(), "Shouldn't be set yet");
  2584   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
  2585     "Should be NULL");
  2586   if (registerClosure) {
  2587     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
  2589   cmsSpace()->gc_prologue();
  2590   // Clear stat counters
  2591   NOT_PRODUCT(
  2592     assert(_numObjectsPromoted == 0, "check");
  2593     assert(_numWordsPromoted   == 0, "check");
  2594     if (Verbose && PrintGC) {
  2595       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
  2596                           SIZE_FORMAT" bytes concurrently",
  2597       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
  2599     _numObjectsAllocated = 0;
  2600     _numWordsAllocated   = 0;
  2604 void CMSCollector::gc_epilogue(bool full) {
  2605   // The following locking discipline assumes that we are only called
  2606   // when the world is stopped.
  2607   assert(SafepointSynchronize::is_at_safepoint(),
  2608          "world is stopped assumption");
  2610   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
  2611   // if linear allocation blocks need to be appropriately marked to allow the
  2612   // the blocks to be parsable. We also check here whether we need to nudge the
  2613   // CMS collector thread to start a new cycle (if it's not already active).
  2614   assert(   Thread::current()->is_VM_thread()
  2615          || (   CMSScavengeBeforeRemark
  2616              && Thread::current()->is_ConcurrentGC_thread()),
  2617          "Incorrect thread type for epilogue execution");
  2619   if (!_between_prologue_and_epilogue) {
  2620     // We have already been invoked; this is a gc_epilogue delegation
  2621     // from yet another CMS generation that we are responsible for, just
  2622     // ignore it since all relevant work has already been done.
  2623     return;
  2625   assert(haveFreelistLocks(), "must have freelist locks");
  2626   assert_lock_strong(bitMapLock());
  2628   _cmsGen->gc_epilogue_work(full);
  2629   _permGen->gc_epilogue_work(full);
  2631   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
  2632     // in case sampling was not already enabled, enable it
  2633     _start_sampling = true;
  2635   // reset _eden_chunk_array so sampling starts afresh
  2636   _eden_chunk_index = 0;
  2638   size_t cms_used   = _cmsGen->cmsSpace()->used();
  2639   size_t perm_used  = _permGen->cmsSpace()->used();
  2641   // update performance counters - this uses a special version of
  2642   // update_counters() that allows the utilization to be passed as a
  2643   // parameter, avoiding multiple calls to used().
  2644   //
  2645   _cmsGen->update_counters(cms_used);
  2646   _permGen->update_counters(perm_used);
  2648   if (CMSIncrementalMode) {
  2649     icms_update_allocation_limits();
  2652   bitMapLock()->unlock();
  2653   releaseFreelistLocks();
  2655   _between_prologue_and_epilogue = false;  // ready for next cycle
  2658 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
  2659   collector()->gc_epilogue(full);
  2661   // Also reset promotion tracking in par gc thread states.
  2662   if (ParallelGCThreads > 0) {
  2663     for (uint i = 0; i < ParallelGCThreads; i++) {
  2664       _par_gc_thread_states[i]->promo.stopTrackingPromotions();
  2669 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
  2670   assert(!incremental_collection_failed(), "Should have been cleared");
  2671   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
  2672   cmsSpace()->gc_epilogue();
  2673     // Print stat counters
  2674   NOT_PRODUCT(
  2675     assert(_numObjectsAllocated == 0, "check");
  2676     assert(_numWordsAllocated == 0, "check");
  2677     if (Verbose && PrintGC) {
  2678       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
  2679                           SIZE_FORMAT" bytes",
  2680                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
  2682     _numObjectsPromoted = 0;
  2683     _numWordsPromoted   = 0;
  2686   if (PrintGC && Verbose) {
  2687     // Call down the chain in contiguous_available needs the freelistLock
  2688     // so print this out before releasing the freeListLock.
  2689     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
  2690                         contiguous_available());
  2694 #ifndef PRODUCT
  2695 bool CMSCollector::have_cms_token() {
  2696   Thread* thr = Thread::current();
  2697   if (thr->is_VM_thread()) {
  2698     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
  2699   } else if (thr->is_ConcurrentGC_thread()) {
  2700     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
  2701   } else if (thr->is_GC_task_thread()) {
  2702     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
  2703            ParGCRareEvent_lock->owned_by_self();
  2705   return false;
  2707 #endif
  2709 // Check reachability of the given heap address in CMS generation,
  2710 // treating all other generations as roots.
  2711 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
  2712   // We could "guarantee" below, rather than assert, but i'll
  2713   // leave these as "asserts" so that an adventurous debugger
  2714   // could try this in the product build provided some subset of
  2715   // the conditions were met, provided they were intersted in the
  2716   // results and knew that the computation below wouldn't interfere
  2717   // with other concurrent computations mutating the structures
  2718   // being read or written.
  2719   assert(SafepointSynchronize::is_at_safepoint(),
  2720          "Else mutations in object graph will make answer suspect");
  2721   assert(have_cms_token(), "Should hold cms token");
  2722   assert(haveFreelistLocks(), "must hold free list locks");
  2723   assert_lock_strong(bitMapLock());
  2725   // Clear the marking bit map array before starting, but, just
  2726   // for kicks, first report if the given address is already marked
  2727   gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
  2728                 _markBitMap.isMarked(addr) ? "" : " not");
  2730   if (verify_after_remark()) {
  2731     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
  2732     bool result = verification_mark_bm()->isMarked(addr);
  2733     gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
  2734                            result ? "IS" : "is NOT");
  2735     return result;
  2736   } else {
  2737     gclog_or_tty->print_cr("Could not compute result");
  2738     return false;
  2742 ////////////////////////////////////////////////////////
  2743 // CMS Verification Support
  2744 ////////////////////////////////////////////////////////
  2745 // Following the remark phase, the following invariant
  2746 // should hold -- each object in the CMS heap which is
  2747 // marked in markBitMap() should be marked in the verification_mark_bm().
  2749 class VerifyMarkedClosure: public BitMapClosure {
  2750   CMSBitMap* _marks;
  2751   bool       _failed;
  2753  public:
  2754   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
  2756   void do_bit(size_t offset) {
  2757     HeapWord* addr = _marks->offsetToHeapWord(offset);
  2758     if (!_marks->isMarked(addr)) {
  2759       oop(addr)->print();
  2760       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
  2761       _failed = true;
  2765   bool failed() { return _failed; }
  2766 };
  2768 bool CMSCollector::verify_after_remark() {
  2769   gclog_or_tty->print(" [Verifying CMS Marking... ");
  2770   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
  2771   static bool init = false;
  2773   assert(SafepointSynchronize::is_at_safepoint(),
  2774          "Else mutations in object graph will make answer suspect");
  2775   assert(have_cms_token(),
  2776          "Else there may be mutual interference in use of "
  2777          " verification data structures");
  2778   assert(_collectorState > Marking && _collectorState <= Sweeping,
  2779          "Else marking info checked here may be obsolete");
  2780   assert(haveFreelistLocks(), "must hold free list locks");
  2781   assert_lock_strong(bitMapLock());
  2784   // Allocate marking bit map if not already allocated
  2785   if (!init) { // first time
  2786     if (!verification_mark_bm()->allocate(_span)) {
  2787       return false;
  2789     init = true;
  2792   assert(verification_mark_stack()->isEmpty(), "Should be empty");
  2794   // Turn off refs discovery -- so we will be tracing through refs.
  2795   // This is as intended, because by this time
  2796   // GC must already have cleared any refs that need to be cleared,
  2797   // and traced those that need to be marked; moreover,
  2798   // the marking done here is not going to intefere in any
  2799   // way with the marking information used by GC.
  2800   NoRefDiscovery no_discovery(ref_processor());
  2802   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
  2804   // Clear any marks from a previous round
  2805   verification_mark_bm()->clear_all();
  2806   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
  2807   assert(overflow_list_is_empty(), "overflow list should be empty");
  2809   GenCollectedHeap* gch = GenCollectedHeap::heap();
  2810   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
  2811   // Update the saved marks which may affect the root scans.
  2812   gch->save_marks();
  2814   if (CMSRemarkVerifyVariant == 1) {
  2815     // In this first variant of verification, we complete
  2816     // all marking, then check if the new marks-verctor is
  2817     // a subset of the CMS marks-vector.
  2818     verify_after_remark_work_1();
  2819   } else if (CMSRemarkVerifyVariant == 2) {
  2820     // In this second variant of verification, we flag an error
  2821     // (i.e. an object reachable in the new marks-vector not reachable
  2822     // in the CMS marks-vector) immediately, also indicating the
  2823     // identify of an object (A) that references the unmarked object (B) --
  2824     // presumably, a mutation to A failed to be picked up by preclean/remark?
  2825     verify_after_remark_work_2();
  2826   } else {
  2827     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
  2828             CMSRemarkVerifyVariant);
  2830   gclog_or_tty->print(" done] ");
  2831   return true;
  2834 void CMSCollector::verify_after_remark_work_1() {
  2835   ResourceMark rm;
  2836   HandleMark  hm;
  2837   GenCollectedHeap* gch = GenCollectedHeap::heap();
  2839   // Mark from roots one level into CMS
  2840   MarkRefsIntoClosure notOlder(_span, verification_mark_bm(), true /* nmethods */);
  2841   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  2843   gch->gen_process_strong_roots(_cmsGen->level(),
  2844                                 true,   // younger gens are roots
  2845                                 true,   // collecting perm gen
  2846                                 SharedHeap::ScanningOption(roots_scanning_options()),
  2847                                 NULL, &notOlder);
  2849   // Now mark from the roots
  2850   assert(_revisitStack.isEmpty(), "Should be empty");
  2851   MarkFromRootsClosure markFromRootsClosure(this, _span,
  2852     verification_mark_bm(), verification_mark_stack(), &_revisitStack,
  2853     false /* don't yield */, true /* verifying */);
  2854   assert(_restart_addr == NULL, "Expected pre-condition");
  2855   verification_mark_bm()->iterate(&markFromRootsClosure);
  2856   while (_restart_addr != NULL) {
  2857     // Deal with stack overflow: by restarting at the indicated
  2858     // address.
  2859     HeapWord* ra = _restart_addr;
  2860     markFromRootsClosure.reset(ra);
  2861     _restart_addr = NULL;
  2862     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
  2864   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
  2865   verify_work_stacks_empty();
  2866   // Should reset the revisit stack above, since no class tree
  2867   // surgery is forthcoming.
  2868   _revisitStack.reset(); // throwing away all contents
  2870   // Marking completed -- now verify that each bit marked in
  2871   // verification_mark_bm() is also marked in markBitMap(); flag all
  2872   // errors by printing corresponding objects.
  2873   VerifyMarkedClosure vcl(markBitMap());
  2874   verification_mark_bm()->iterate(&vcl);
  2875   if (vcl.failed()) {
  2876     gclog_or_tty->print("Verification failed");
  2877     Universe::heap()->print();
  2878     fatal(" ... aborting");
  2882 void CMSCollector::verify_after_remark_work_2() {
  2883   ResourceMark rm;
  2884   HandleMark  hm;
  2885   GenCollectedHeap* gch = GenCollectedHeap::heap();
  2887   // Mark from roots one level into CMS
  2888   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
  2889                                      markBitMap(), true /* nmethods */);
  2890   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  2891   gch->gen_process_strong_roots(_cmsGen->level(),
  2892                                 true,   // younger gens are roots
  2893                                 true,   // collecting perm gen
  2894                                 SharedHeap::ScanningOption(roots_scanning_options()),
  2895                                 NULL, &notOlder);
  2897   // Now mark from the roots
  2898   assert(_revisitStack.isEmpty(), "Should be empty");
  2899   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
  2900     verification_mark_bm(), markBitMap(), verification_mark_stack());
  2901   assert(_restart_addr == NULL, "Expected pre-condition");
  2902   verification_mark_bm()->iterate(&markFromRootsClosure);
  2903   while (_restart_addr != NULL) {
  2904     // Deal with stack overflow: by restarting at the indicated
  2905     // address.
  2906     HeapWord* ra = _restart_addr;
  2907     markFromRootsClosure.reset(ra);
  2908     _restart_addr = NULL;
  2909     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
  2911   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
  2912   verify_work_stacks_empty();
  2913   // Should reset the revisit stack above, since no class tree
  2914   // surgery is forthcoming.
  2915   _revisitStack.reset(); // throwing away all contents
  2917   // Marking completed -- now verify that each bit marked in
  2918   // verification_mark_bm() is also marked in markBitMap(); flag all
  2919   // errors by printing corresponding objects.
  2920   VerifyMarkedClosure vcl(markBitMap());
  2921   verification_mark_bm()->iterate(&vcl);
  2922   assert(!vcl.failed(), "Else verification above should not have succeeded");
  2925 void ConcurrentMarkSweepGeneration::save_marks() {
  2926   // delegate to CMS space
  2927   cmsSpace()->save_marks();
  2928   for (uint i = 0; i < ParallelGCThreads; i++) {
  2929     _par_gc_thread_states[i]->promo.startTrackingPromotions();
  2933 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
  2934   return cmsSpace()->no_allocs_since_save_marks();
  2937 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
  2939 void ConcurrentMarkSweepGeneration::                            \
  2940 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
  2941   cl->set_generation(this);                                     \
  2942   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
  2943   cl->reset_generation();                                       \
  2944   save_marks();                                                 \
  2947 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
  2949 void
  2950 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
  2952   // Not currently implemented; need to do the following. -- ysr.
  2953   // dld -- I think that is used for some sort of allocation profiler.  So it
  2954   // really means the objects allocated by the mutator since the last
  2955   // GC.  We could potentially implement this cheaply by recording only
  2956   // the direct allocations in a side data structure.
  2957   //
  2958   // I think we probably ought not to be required to support these
  2959   // iterations at any arbitrary point; I think there ought to be some
  2960   // call to enable/disable allocation profiling in a generation/space,
  2961   // and the iterator ought to return the objects allocated in the
  2962   // gen/space since the enable call, or the last iterator call (which
  2963   // will probably be at a GC.)  That way, for gens like CM&S that would
  2964   // require some extra data structure to support this, we only pay the
  2965   // cost when it's in use...
  2966   cmsSpace()->object_iterate_since_last_GC(blk);
  2969 void
  2970 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
  2971   cl->set_generation(this);
  2972   younger_refs_in_space_iterate(_cmsSpace, cl);
  2973   cl->reset_generation();
  2976 void
  2977 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) {
  2978   if (freelistLock()->owned_by_self()) {
  2979     Generation::oop_iterate(mr, cl);
  2980   } else {
  2981     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  2982     Generation::oop_iterate(mr, cl);
  2986 void
  2987 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) {
  2988   if (freelistLock()->owned_by_self()) {
  2989     Generation::oop_iterate(cl);
  2990   } else {
  2991     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  2992     Generation::oop_iterate(cl);
  2996 void
  2997 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
  2998   if (freelistLock()->owned_by_self()) {
  2999     Generation::object_iterate(cl);
  3000   } else {
  3001     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  3002     Generation::object_iterate(cl);
  3006 void
  3007 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
  3010 void
  3011 ConcurrentMarkSweepGeneration::post_compact() {
  3014 void
  3015 ConcurrentMarkSweepGeneration::prepare_for_verify() {
  3016   // Fix the linear allocation blocks to look like free blocks.
  3018   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
  3019   // are not called when the heap is verified during universe initialization and
  3020   // at vm shutdown.
  3021   if (freelistLock()->owned_by_self()) {
  3022     cmsSpace()->prepare_for_verify();
  3023   } else {
  3024     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
  3025     cmsSpace()->prepare_for_verify();
  3029 void
  3030 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) {
  3031   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
  3032   // are not called when the heap is verified during universe initialization and
  3033   // at vm shutdown.
  3034   if (freelistLock()->owned_by_self()) {
  3035     cmsSpace()->verify(false /* ignored */);
  3036   } else {
  3037     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
  3038     cmsSpace()->verify(false /* ignored */);
  3042 void CMSCollector::verify(bool allow_dirty /* ignored */) {
  3043   _cmsGen->verify(allow_dirty);
  3044   _permGen->verify(allow_dirty);
  3047 #ifndef PRODUCT
  3048 bool CMSCollector::overflow_list_is_empty() const {
  3049   assert(_num_par_pushes >= 0, "Inconsistency");
  3050   if (_overflow_list == NULL) {
  3051     assert(_num_par_pushes == 0, "Inconsistency");
  3053   return _overflow_list == NULL;
  3056 // The methods verify_work_stacks_empty() and verify_overflow_empty()
  3057 // merely consolidate assertion checks that appear to occur together frequently.
  3058 void CMSCollector::verify_work_stacks_empty() const {
  3059   assert(_markStack.isEmpty(), "Marking stack should be empty");
  3060   assert(overflow_list_is_empty(), "Overflow list should be empty");
  3063 void CMSCollector::verify_overflow_empty() const {
  3064   assert(overflow_list_is_empty(), "Overflow list should be empty");
  3065   assert(no_preserved_marks(), "No preserved marks");
  3067 #endif // PRODUCT
  3069 // Decide if we want to enable class unloading as part of the
  3070 // ensuing concurrent GC cycle. We will collect the perm gen and
  3071 // unload classes if it's the case that:
  3072 // (1) an explicit gc request has been made and the flag
  3073 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
  3074 // (2) (a) class unloading is enabled at the command line, and
  3075 //     (b) (i)   perm gen threshold has been crossed, or
  3076 //         (ii)  old gen is getting really full, or
  3077 //         (iii) the previous N CMS collections did not collect the
  3078 //               perm gen
  3079 // NOTE: Provided there is no change in the state of the heap between
  3080 // calls to this method, it should have idempotent results. Moreover,
  3081 // its results should be monotonically increasing (i.e. going from 0 to 1,
  3082 // but not 1 to 0) between successive calls between which the heap was
  3083 // not collected. For the implementation below, it must thus rely on
  3084 // the property that concurrent_cycles_since_last_unload()
  3085 // will not decrease unless a collection cycle happened and that
  3086 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are
  3087 // themselves also monotonic in that sense. See check_monotonicity()
  3088 // below.
  3089 bool CMSCollector::update_should_unload_classes() {
  3090   _should_unload_classes = false;
  3091   // Condition 1 above
  3092   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
  3093     _should_unload_classes = true;
  3094   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
  3095     // Disjuncts 2.b.(i,ii,iii) above
  3096     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
  3097                               CMSClassUnloadingMaxInterval)
  3098                            || _permGen->should_concurrent_collect()
  3099                            || _cmsGen->is_too_full();
  3101   return _should_unload_classes;
  3104 bool ConcurrentMarkSweepGeneration::is_too_full() const {
  3105   bool res = should_concurrent_collect();
  3106   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
  3107   return res;
  3110 void CMSCollector::setup_cms_unloading_and_verification_state() {
  3111   const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
  3112                              || VerifyBeforeExit;
  3113   const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
  3114                              |   SharedHeap::SO_CodeCache;
  3116   if (should_unload_classes()) {   // Should unload classes this cycle
  3117     remove_root_scanning_option(rso);  // Shrink the root set appropriately
  3118     set_verifying(should_verify);    // Set verification state for this cycle
  3119     return;                            // Nothing else needs to be done at this time
  3122   // Not unloading classes this cycle
  3123   assert(!should_unload_classes(), "Inconsitency!");
  3124   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
  3125     // We were not verifying, or we _were_ unloading classes in the last cycle,
  3126     // AND some verification options are enabled this cycle; in this case,
  3127     // we must make sure that the deadness map is allocated if not already so,
  3128     // and cleared (if already allocated previously --
  3129     // CMSBitMap::sizeInBits() is used to determine if it's allocated).
  3130     if (perm_gen_verify_bit_map()->sizeInBits() == 0) {
  3131       if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) {
  3132         warning("Failed to allocate permanent generation verification CMS Bit Map;\n"
  3133                 "permanent generation verification disabled");
  3134         return;  // Note that we leave verification disabled, so we'll retry this
  3135                  // allocation next cycle. We _could_ remember this failure
  3136                  // and skip further attempts and permanently disable verification
  3137                  // attempts if that is considered more desirable.
  3139       assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()),
  3140               "_perm_gen_ver_bit_map inconsistency?");
  3141     } else {
  3142       perm_gen_verify_bit_map()->clear_all();
  3144     // Include symbols, strings and code cache elements to prevent their resurrection.
  3145     add_root_scanning_option(rso);
  3146     set_verifying(true);
  3147   } else if (verifying() && !should_verify) {
  3148     // We were verifying, but some verification flags got disabled.
  3149     set_verifying(false);
  3150     // Exclude symbols, strings and code cache elements from root scanning to
  3151     // reduce IM and RM pauses.
  3152     remove_root_scanning_option(rso);
  3157 #ifndef PRODUCT
  3158 HeapWord* CMSCollector::block_start(const void* p) const {
  3159   const HeapWord* addr = (HeapWord*)p;
  3160   if (_span.contains(p)) {
  3161     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
  3162       return _cmsGen->cmsSpace()->block_start(p);
  3163     } else {
  3164       assert(_permGen->cmsSpace()->is_in_reserved(addr),
  3165              "Inconsistent _span?");
  3166       return _permGen->cmsSpace()->block_start(p);
  3169   return NULL;
  3171 #endif
  3173 HeapWord*
  3174 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
  3175                                                    bool   tlab,
  3176                                                    bool   parallel) {
  3177   assert(!tlab, "Can't deal with TLAB allocation");
  3178   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  3179   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
  3180     CMSExpansionCause::_satisfy_allocation);
  3181   if (GCExpandToAllocateDelayMillis > 0) {
  3182     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
  3184   return have_lock_and_allocate(word_size, tlab);
  3187 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
  3188 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
  3189 // to CardGeneration and share it...
  3190 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
  3191   CMSExpansionCause::Cause cause)
  3193   assert_locked_or_safepoint(Heap_lock);
  3195   size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
  3196   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
  3197   bool success = false;
  3198   if (aligned_expand_bytes > aligned_bytes) {
  3199     success = grow_by(aligned_expand_bytes);
  3201   if (!success) {
  3202     success = grow_by(aligned_bytes);
  3204   if (!success) {
  3205     size_t remaining_bytes = _virtual_space.uncommitted_size();
  3206     if (remaining_bytes > 0) {
  3207       success = grow_by(remaining_bytes);
  3210   if (GC_locker::is_active()) {
  3211     if (PrintGC && Verbose) {
  3212       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
  3215   // remember why we expanded; this information is used
  3216   // by shouldConcurrentCollect() when making decisions on whether to start
  3217   // a new CMS cycle.
  3218   if (success) {
  3219     set_expansion_cause(cause);
  3220     if (PrintGCDetails && Verbose) {
  3221       gclog_or_tty->print_cr("Expanded CMS gen for %s",
  3222         CMSExpansionCause::to_string(cause));
  3227 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
  3228   HeapWord* res = NULL;
  3229   MutexLocker x(ParGCRareEvent_lock);
  3230   while (true) {
  3231     // Expansion by some other thread might make alloc OK now:
  3232     res = ps->lab.alloc(word_sz);
  3233     if (res != NULL) return res;
  3234     // If there's not enough expansion space available, give up.
  3235     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
  3236       return NULL;
  3238     // Otherwise, we try expansion.
  3239     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
  3240       CMSExpansionCause::_allocate_par_lab);
  3241     // Now go around the loop and try alloc again;
  3242     // A competing par_promote might beat us to the expansion space,
  3243     // so we may go around the loop again if promotion fails agaion.
  3244     if (GCExpandToAllocateDelayMillis > 0) {
  3245       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
  3251 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
  3252   PromotionInfo* promo) {
  3253   MutexLocker x(ParGCRareEvent_lock);
  3254   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
  3255   while (true) {
  3256     // Expansion by some other thread might make alloc OK now:
  3257     if (promo->ensure_spooling_space()) {
  3258       assert(promo->has_spooling_space(),
  3259              "Post-condition of successful ensure_spooling_space()");
  3260       return true;
  3262     // If there's not enough expansion space available, give up.
  3263     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
  3264       return false;
  3266     // Otherwise, we try expansion.
  3267     expand(refill_size_bytes, MinHeapDeltaBytes,
  3268       CMSExpansionCause::_allocate_par_spooling_space);
  3269     // Now go around the loop and try alloc again;
  3270     // A competing allocation might beat us to the expansion space,
  3271     // so we may go around the loop again if allocation fails again.
  3272     if (GCExpandToAllocateDelayMillis > 0) {
  3273       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
  3280 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
  3281   assert_locked_or_safepoint(Heap_lock);
  3282   size_t size = ReservedSpace::page_align_size_down(bytes);
  3283   if (size > 0) {
  3284     shrink_by(size);
  3288 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
  3289   assert_locked_or_safepoint(Heap_lock);
  3290   bool result = _virtual_space.expand_by(bytes);
  3291   if (result) {
  3292     HeapWord* old_end = _cmsSpace->end();
  3293     size_t new_word_size =
  3294       heap_word_size(_virtual_space.committed_size());
  3295     MemRegion mr(_cmsSpace->bottom(), new_word_size);
  3296     _bts->resize(new_word_size);  // resize the block offset shared array
  3297     Universe::heap()->barrier_set()->resize_covered_region(mr);
  3298     // Hmmmm... why doesn't CFLS::set_end verify locking?
  3299     // This is quite ugly; FIX ME XXX
  3300     _cmsSpace->assert_locked();
  3301     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
  3303     // update the space and generation capacity counters
  3304     if (UsePerfData) {
  3305       _space_counters->update_capacity();
  3306       _gen_counters->update_all();
  3309     if (Verbose && PrintGC) {
  3310       size_t new_mem_size = _virtual_space.committed_size();
  3311       size_t old_mem_size = new_mem_size - bytes;
  3312       gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
  3313                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
  3316   return result;
  3319 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
  3320   assert_locked_or_safepoint(Heap_lock);
  3321   bool success = true;
  3322   const size_t remaining_bytes = _virtual_space.uncommitted_size();
  3323   if (remaining_bytes > 0) {
  3324     success = grow_by(remaining_bytes);
  3325     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
  3327   return success;
  3330 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
  3331   assert_locked_or_safepoint(Heap_lock);
  3332   assert_lock_strong(freelistLock());
  3333   // XXX Fix when compaction is implemented.
  3334   warning("Shrinking of CMS not yet implemented");
  3335   return;
  3339 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
  3340 // phases.
  3341 class CMSPhaseAccounting: public StackObj {
  3342  public:
  3343   CMSPhaseAccounting(CMSCollector *collector,
  3344                      const char *phase,
  3345                      bool print_cr = true);
  3346   ~CMSPhaseAccounting();
  3348  private:
  3349   CMSCollector *_collector;
  3350   const char *_phase;
  3351   elapsedTimer _wallclock;
  3352   bool _print_cr;
  3354  public:
  3355   // Not MT-safe; so do not pass around these StackObj's
  3356   // where they may be accessed by other threads.
  3357   jlong wallclock_millis() {
  3358     assert(_wallclock.is_active(), "Wall clock should not stop");
  3359     _wallclock.stop();  // to record time
  3360     jlong ret = _wallclock.milliseconds();
  3361     _wallclock.start(); // restart
  3362     return ret;
  3364 };
  3366 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
  3367                                        const char *phase,
  3368                                        bool print_cr) :
  3369   _collector(collector), _phase(phase), _print_cr(print_cr) {
  3371   if (PrintCMSStatistics != 0) {
  3372     _collector->resetYields();
  3374   if (PrintGCDetails && PrintGCTimeStamps) {
  3375     gclog_or_tty->date_stamp(PrintGCDateStamps);
  3376     gclog_or_tty->stamp();
  3377     gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
  3378       _collector->cmsGen()->short_name(), _phase);
  3380   _collector->resetTimer();
  3381   _wallclock.start();
  3382   _collector->startTimer();
  3385 CMSPhaseAccounting::~CMSPhaseAccounting() {
  3386   assert(_wallclock.is_active(), "Wall clock should not have stopped");
  3387   _collector->stopTimer();
  3388   _wallclock.stop();
  3389   if (PrintGCDetails) {
  3390     gclog_or_tty->date_stamp(PrintGCDateStamps);
  3391     if (PrintGCTimeStamps) {
  3392       gclog_or_tty->stamp();
  3393       gclog_or_tty->print(": ");
  3395     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
  3396                  _collector->cmsGen()->short_name(),
  3397                  _phase, _collector->timerValue(), _wallclock.seconds());
  3398     if (_print_cr) {
  3399       gclog_or_tty->print_cr("");
  3401     if (PrintCMSStatistics != 0) {
  3402       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
  3403                     _collector->yields());
  3408 // CMS work
  3410 // Checkpoint the roots into this generation from outside
  3411 // this generation. [Note this initial checkpoint need only
  3412 // be approximate -- we'll do a catch up phase subsequently.]
  3413 void CMSCollector::checkpointRootsInitial(bool asynch) {
  3414   assert(_collectorState == InitialMarking, "Wrong collector state");
  3415   check_correct_thread_executing();
  3416   ReferenceProcessor* rp = ref_processor();
  3417   SpecializationStats::clear();
  3418   assert(_restart_addr == NULL, "Control point invariant");
  3419   if (asynch) {
  3420     // acquire locks for subsequent manipulations
  3421     MutexLockerEx x(bitMapLock(),
  3422                     Mutex::_no_safepoint_check_flag);
  3423     checkpointRootsInitialWork(asynch);
  3424     rp->verify_no_references_recorded();
  3425     rp->enable_discovery(); // enable ("weak") refs discovery
  3426     _collectorState = Marking;
  3427   } else {
  3428     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
  3429     // which recognizes if we are a CMS generation, and doesn't try to turn on
  3430     // discovery; verify that they aren't meddling.
  3431     assert(!rp->discovery_is_atomic(),
  3432            "incorrect setting of discovery predicate");
  3433     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
  3434            "ref discovery for this generation kind");
  3435     // already have locks
  3436     checkpointRootsInitialWork(asynch);
  3437     rp->enable_discovery(); // now enable ("weak") refs discovery
  3438     _collectorState = Marking;
  3440   SpecializationStats::print();
  3443 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
  3444   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
  3445   assert(_collectorState == InitialMarking, "just checking");
  3447   // If there has not been a GC[n-1] since last GC[n] cycle completed,
  3448   // precede our marking with a collection of all
  3449   // younger generations to keep floating garbage to a minimum.
  3450   // XXX: we won't do this for now -- it's an optimization to be done later.
  3452   // already have locks
  3453   assert_lock_strong(bitMapLock());
  3454   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
  3456   // Setup the verification and class unloading state for this
  3457   // CMS collection cycle.
  3458   setup_cms_unloading_and_verification_state();
  3460   NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
  3461     PrintGCDetails && Verbose, true, gclog_or_tty);)
  3462   if (UseAdaptiveSizePolicy) {
  3463     size_policy()->checkpoint_roots_initial_begin();
  3466   // Reset all the PLAB chunk arrays if necessary.
  3467   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
  3468     reset_survivor_plab_arrays();
  3471   ResourceMark rm;
  3472   HandleMark  hm;
  3474   FalseClosure falseClosure;
  3475   // In the case of a synchronous collection, we will elide the
  3476   // remark step, so it's important to catch all the nmethod oops
  3477   // in this step; hence the last argument to the constrcutor below.
  3478   MarkRefsIntoClosure notOlder(_span, &_markBitMap, !asynch /* nmethods */);
  3479   GenCollectedHeap* gch = GenCollectedHeap::heap();
  3481   verify_work_stacks_empty();
  3482   verify_overflow_empty();
  3484   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
  3485   // Update the saved marks which may affect the root scans.
  3486   gch->save_marks();
  3488   // weak reference processing has not started yet.
  3489   ref_processor()->set_enqueuing_is_done(false);
  3492     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
  3493     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  3494     gch->gen_process_strong_roots(_cmsGen->level(),
  3495                                   true,   // younger gens are roots
  3496                                   true,   // collecting perm gen
  3497                                   SharedHeap::ScanningOption(roots_scanning_options()),
  3498                                   NULL, &notOlder);
  3501   // Clear mod-union table; it will be dirtied in the prologue of
  3502   // CMS generation per each younger generation collection.
  3504   assert(_modUnionTable.isAllClear(),
  3505        "Was cleared in most recent final checkpoint phase"
  3506        " or no bits are set in the gc_prologue before the start of the next "
  3507        "subsequent marking phase.");
  3509   // Temporarily disabled, since pre/post-consumption closures don't
  3510   // care about precleaned cards
  3511   #if 0
  3513     MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
  3514                              (HeapWord*)_virtual_space.high());
  3515     _ct->ct_bs()->preclean_dirty_cards(mr);
  3517   #endif
  3519   // Save the end of the used_region of the constituent generations
  3520   // to be used to limit the extent of sweep in each generation.
  3521   save_sweep_limits();
  3522   if (UseAdaptiveSizePolicy) {
  3523     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
  3525   verify_overflow_empty();
  3528 bool CMSCollector::markFromRoots(bool asynch) {
  3529   // we might be tempted to assert that:
  3530   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
  3531   //        "inconsistent argument?");
  3532   // However that wouldn't be right, because it's possible that
  3533   // a safepoint is indeed in progress as a younger generation
  3534   // stop-the-world GC happens even as we mark in this generation.
  3535   assert(_collectorState == Marking, "inconsistent state?");
  3536   check_correct_thread_executing();
  3537   verify_overflow_empty();
  3539   bool res;
  3540   if (asynch) {
  3542     // Start the timers for adaptive size policy for the concurrent phases
  3543     // Do it here so that the foreground MS can use the concurrent
  3544     // timer since a foreground MS might has the sweep done concurrently
  3545     // or STW.
  3546     if (UseAdaptiveSizePolicy) {
  3547       size_policy()->concurrent_marking_begin();
  3550     // Weak ref discovery note: We may be discovering weak
  3551     // refs in this generation concurrent (but interleaved) with
  3552     // weak ref discovery by a younger generation collector.
  3554     CMSTokenSyncWithLocks ts(true, bitMapLock());
  3555     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  3556     CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
  3557     res = markFromRootsWork(asynch);
  3558     if (res) {
  3559       _collectorState = Precleaning;
  3560     } else { // We failed and a foreground collection wants to take over
  3561       assert(_foregroundGCIsActive, "internal state inconsistency");
  3562       assert(_restart_addr == NULL,  "foreground will restart from scratch");
  3563       if (PrintGCDetails) {
  3564         gclog_or_tty->print_cr("bailing out to foreground collection");
  3567     if (UseAdaptiveSizePolicy) {
  3568       size_policy()->concurrent_marking_end();
  3570   } else {
  3571     assert(SafepointSynchronize::is_at_safepoint(),
  3572            "inconsistent with asynch == false");
  3573     if (UseAdaptiveSizePolicy) {
  3574       size_policy()->ms_collection_marking_begin();
  3576     // already have locks
  3577     res = markFromRootsWork(asynch);
  3578     _collectorState = FinalMarking;
  3579     if (UseAdaptiveSizePolicy) {
  3580       GenCollectedHeap* gch = GenCollectedHeap::heap();
  3581       size_policy()->ms_collection_marking_end(gch->gc_cause());
  3584   verify_overflow_empty();
  3585   return res;
  3588 bool CMSCollector::markFromRootsWork(bool asynch) {
  3589   // iterate over marked bits in bit map, doing a full scan and mark
  3590   // from these roots using the following algorithm:
  3591   // . if oop is to the right of the current scan pointer,
  3592   //   mark corresponding bit (we'll process it later)
  3593   // . else (oop is to left of current scan pointer)
  3594   //   push oop on marking stack
  3595   // . drain the marking stack
  3597   // Note that when we do a marking step we need to hold the
  3598   // bit map lock -- recall that direct allocation (by mutators)
  3599   // and promotion (by younger generation collectors) is also
  3600   // marking the bit map. [the so-called allocate live policy.]
  3601   // Because the implementation of bit map marking is not
  3602   // robust wrt simultaneous marking of bits in the same word,
  3603   // we need to make sure that there is no such interference
  3604   // between concurrent such updates.
  3606   // already have locks
  3607   assert_lock_strong(bitMapLock());
  3609   // Clear the revisit stack, just in case there are any
  3610   // obsolete contents from a short-circuited previous CMS cycle.
  3611   _revisitStack.reset();
  3612   verify_work_stacks_empty();
  3613   verify_overflow_empty();
  3614   assert(_revisitStack.isEmpty(), "tabula rasa");
  3616   bool result = false;
  3617   if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
  3618     result = do_marking_mt(asynch);
  3619   } else {
  3620     result = do_marking_st(asynch);
  3622   return result;
  3625 // Forward decl
  3626 class CMSConcMarkingTask;
  3628 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
  3629   CMSCollector*       _collector;
  3630   CMSConcMarkingTask* _task;
  3631   bool _yield;
  3632  protected:
  3633   virtual void yield();
  3634  public:
  3635   // "n_threads" is the number of threads to be terminated.
  3636   // "queue_set" is a set of work queues of other threads.
  3637   // "collector" is the CMS collector associated with this task terminator.
  3638   // "yield" indicates whether we need the gang as a whole to yield.
  3639   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set,
  3640                            CMSCollector* collector, bool yield) :
  3641     ParallelTaskTerminator(n_threads, queue_set),
  3642     _collector(collector),
  3643     _yield(yield) { }
  3645   void set_task(CMSConcMarkingTask* task) {
  3646     _task = task;
  3648 };
  3650 // MT Concurrent Marking Task
  3651 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
  3652   CMSCollector* _collector;
  3653   YieldingFlexibleWorkGang* _workers;        // the whole gang
  3654   int           _n_workers;                  // requested/desired # workers
  3655   bool          _asynch;
  3656   bool          _result;
  3657   CompactibleFreeListSpace*  _cms_space;
  3658   CompactibleFreeListSpace* _perm_space;
  3659   HeapWord*     _global_finger;
  3661   //  Exposed here for yielding support
  3662   Mutex* const _bit_map_lock;
  3664   // The per thread work queues, available here for stealing
  3665   OopTaskQueueSet*  _task_queues;
  3666   CMSConcMarkingTerminator _term;
  3668  public:
  3669   CMSConcMarkingTask(CMSCollector* collector,
  3670                  CompactibleFreeListSpace* cms_space,
  3671                  CompactibleFreeListSpace* perm_space,
  3672                  bool asynch, int n_workers,
  3673                  YieldingFlexibleWorkGang* workers,
  3674                  OopTaskQueueSet* task_queues):
  3675     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
  3676     _collector(collector),
  3677     _cms_space(cms_space),
  3678     _perm_space(perm_space),
  3679     _asynch(asynch), _n_workers(n_workers), _result(true),
  3680     _workers(workers), _task_queues(task_queues),
  3681     _term(n_workers, task_queues, _collector, asynch),
  3682     _bit_map_lock(collector->bitMapLock())
  3684     assert(n_workers <= workers->total_workers(),
  3685            "Else termination won't work correctly today"); // XXX FIX ME!
  3686     _requested_size = n_workers;
  3687     _term.set_task(this);
  3688     assert(_cms_space->bottom() < _perm_space->bottom(),
  3689            "Finger incorrectly initialized below");
  3690     _global_finger = _cms_space->bottom();
  3694   OopTaskQueueSet* task_queues()  { return _task_queues; }
  3696   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
  3698   HeapWord** global_finger_addr() { return &_global_finger; }
  3700   CMSConcMarkingTerminator* terminator() { return &_term; }
  3702   void work(int i);
  3704   virtual void coordinator_yield();  // stuff done by coordinator
  3705   bool result() { return _result; }
  3707   void reset(HeapWord* ra) {
  3708     _term.reset_for_reuse();
  3711   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
  3712                                            OopTaskQueue* work_q);
  3714  private:
  3715   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
  3716   void do_work_steal(int i);
  3717   void bump_global_finger(HeapWord* f);
  3718 };
  3720 void CMSConcMarkingTerminator::yield() {
  3721   if (ConcurrentMarkSweepThread::should_yield() &&
  3722       !_collector->foregroundGCIsActive() &&
  3723       _yield) {
  3724     _task->yield();
  3725   } else {
  3726     ParallelTaskTerminator::yield();
  3730 ////////////////////////////////////////////////////////////////
  3731 // Concurrent Marking Algorithm Sketch
  3732 ////////////////////////////////////////////////////////////////
  3733 // Until all tasks exhausted (both spaces):
  3734 // -- claim next available chunk
  3735 // -- bump global finger via CAS
  3736 // -- find first object that starts in this chunk
  3737 //    and start scanning bitmap from that position
  3738 // -- scan marked objects for oops
  3739 // -- CAS-mark target, and if successful:
  3740 //    . if target oop is above global finger (volatile read)
  3741 //      nothing to do
  3742 //    . if target oop is in chunk and above local finger
  3743 //        then nothing to do
  3744 //    . else push on work-queue
  3745 // -- Deal with possible overflow issues:
  3746 //    . local work-queue overflow causes stuff to be pushed on
  3747 //      global (common) overflow queue
  3748 //    . always first empty local work queue
  3749 //    . then get a batch of oops from global work queue if any
  3750 //    . then do work stealing
  3751 // -- When all tasks claimed (both spaces)
  3752 //    and local work queue empty,
  3753 //    then in a loop do:
  3754 //    . check global overflow stack; steal a batch of oops and trace
  3755 //    . try to steal from other threads oif GOS is empty
  3756 //    . if neither is available, offer termination
  3757 // -- Terminate and return result
  3758 //
  3759 void CMSConcMarkingTask::work(int i) {
  3760   elapsedTimer _timer;
  3761   ResourceMark rm;
  3762   HandleMark hm;
  3764   DEBUG_ONLY(_collector->verify_overflow_empty();)
  3766   // Before we begin work, our work queue should be empty
  3767   assert(work_queue(i)->size() == 0, "Expected to be empty");
  3768   // Scan the bitmap covering _cms_space, tracing through grey objects.
  3769   _timer.start();
  3770   do_scan_and_mark(i, _cms_space);
  3771   _timer.stop();
  3772   if (PrintCMSStatistics != 0) {
  3773     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
  3774       i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
  3777   // ... do the same for the _perm_space
  3778   _timer.reset();
  3779   _timer.start();
  3780   do_scan_and_mark(i, _perm_space);
  3781   _timer.stop();
  3782   if (PrintCMSStatistics != 0) {
  3783     gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
  3784       i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
  3787   // ... do work stealing
  3788   _timer.reset();
  3789   _timer.start();
  3790   do_work_steal(i);
  3791   _timer.stop();
  3792   if (PrintCMSStatistics != 0) {
  3793     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
  3794       i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
  3796   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
  3797   assert(work_queue(i)->size() == 0, "Should have been emptied");
  3798   // Note that under the current task protocol, the
  3799   // following assertion is true even of the spaces
  3800   // expanded since the completion of the concurrent
  3801   // marking. XXX This will likely change under a strict
  3802   // ABORT semantics.
  3803   assert(_global_finger >  _cms_space->end() &&
  3804          _global_finger >= _perm_space->end(),
  3805          "All tasks have been completed");
  3806   DEBUG_ONLY(_collector->verify_overflow_empty();)
  3809 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
  3810   HeapWord* read = _global_finger;
  3811   HeapWord* cur  = read;
  3812   while (f > read) {
  3813     cur = read;
  3814     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
  3815     if (cur == read) {
  3816       // our cas succeeded
  3817       assert(_global_finger >= f, "protocol consistency");
  3818       break;
  3823 // This is really inefficient, and should be redone by
  3824 // using (not yet available) block-read and -write interfaces to the
  3825 // stack and the work_queue. XXX FIX ME !!!
  3826 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
  3827                                                       OopTaskQueue* work_q) {
  3828   // Fast lock-free check
  3829   if (ovflw_stk->length() == 0) {
  3830     return false;
  3832   assert(work_q->size() == 0, "Shouldn't steal");
  3833   MutexLockerEx ml(ovflw_stk->par_lock(),
  3834                    Mutex::_no_safepoint_check_flag);
  3835   // Grab up to 1/4 the size of the work queue
  3836   size_t num = MIN2((size_t)work_q->max_elems()/4,
  3837                     (size_t)ParGCDesiredObjsFromOverflowList);
  3838   num = MIN2(num, ovflw_stk->length());
  3839   for (int i = (int) num; i > 0; i--) {
  3840     oop cur = ovflw_stk->pop();
  3841     assert(cur != NULL, "Counted wrong?");
  3842     work_q->push(cur);
  3844   return num > 0;
  3847 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
  3848   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
  3849   int n_tasks = pst->n_tasks();
  3850   // We allow that there may be no tasks to do here because
  3851   // we are restarting after a stack overflow.
  3852   assert(pst->valid() || n_tasks == 0, "Uninitializd use?");
  3853   int nth_task = 0;
  3855   HeapWord* start = sp->bottom();
  3856   size_t chunk_size = sp->marking_task_size();
  3857   while (!pst->is_task_claimed(/* reference */ nth_task)) {
  3858     // Having claimed the nth task in this space,
  3859     // compute the chunk that it corresponds to:
  3860     MemRegion span = MemRegion(start + nth_task*chunk_size,
  3861                                start + (nth_task+1)*chunk_size);
  3862     // Try and bump the global finger via a CAS;
  3863     // note that we need to do the global finger bump
  3864     // _before_ taking the intersection below, because
  3865     // the task corresponding to that region will be
  3866     // deemed done even if the used_region() expands
  3867     // because of allocation -- as it almost certainly will
  3868     // during start-up while the threads yield in the
  3869     // closure below.
  3870     HeapWord* finger = span.end();
  3871     bump_global_finger(finger);   // atomically
  3872     // There are null tasks here corresponding to chunks
  3873     // beyond the "top" address of the space.
  3874     span = span.intersection(sp->used_region());
  3875     if (!span.is_empty()) {  // Non-null task
  3876       // We want to skip the first object because
  3877       // the protocol is to scan any object in its entirety
  3878       // that _starts_ in this span; a fortiori, any
  3879       // object starting in an earlier span is scanned
  3880       // as part of an earlier claimed task.
  3881       // Below we use the "careful" version of block_start
  3882       // so we do not try to navigate uninitialized objects.
  3883       HeapWord* prev_obj = sp->block_start_careful(span.start());
  3884       // Below we use a variant of block_size that uses the
  3885       // Printezis bits to avoid waiting for allocated
  3886       // objects to become initialized/parsable.
  3887       while (prev_obj < span.start()) {
  3888         size_t sz = sp->block_size_no_stall(prev_obj, _collector);
  3889         if (sz > 0) {
  3890           prev_obj += sz;
  3891         } else {
  3892           // In this case we may end up doing a bit of redundant
  3893           // scanning, but that appears unavoidable, short of
  3894           // locking the free list locks; see bug 6324141.
  3895           break;
  3898       if (prev_obj < span.end()) {
  3899         MemRegion my_span = MemRegion(prev_obj, span.end());
  3900         // Do the marking work within a non-empty span --
  3901         // the last argument to the constructor indicates whether the
  3902         // iteration should be incremental with periodic yields.
  3903         Par_MarkFromRootsClosure cl(this, _collector, my_span,
  3904                                     &_collector->_markBitMap,
  3905                                     work_queue(i),
  3906                                     &_collector->_markStack,
  3907                                     &_collector->_revisitStack,
  3908                                     _asynch);
  3909         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
  3910       } // else nothing to do for this task
  3911     }   // else nothing to do for this task
  3913   // We'd be tempted to assert here that since there are no
  3914   // more tasks left to claim in this space, the global_finger
  3915   // must exceed space->top() and a fortiori space->end(). However,
  3916   // that would not quite be correct because the bumping of
  3917   // global_finger occurs strictly after the claiming of a task,
  3918   // so by the time we reach here the global finger may not yet
  3919   // have been bumped up by the thread that claimed the last
  3920   // task.
  3921   pst->all_tasks_completed();
  3924 class Par_ConcMarkingClosure: public OopClosure {
  3925  private:
  3926   CMSCollector* _collector;
  3927   MemRegion     _span;
  3928   CMSBitMap*    _bit_map;
  3929   CMSMarkStack* _overflow_stack;
  3930   CMSMarkStack* _revisit_stack;     // XXXXXX Check proper use
  3931   OopTaskQueue* _work_queue;
  3932  protected:
  3933   DO_OOP_WORK_DEFN
  3934  public:
  3935   Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
  3936                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
  3937     _collector(collector),
  3938     _span(_collector->_span),
  3939     _work_queue(work_queue),
  3940     _bit_map(bit_map),
  3941     _overflow_stack(overflow_stack) { }   // need to initialize revisit stack etc.
  3942   virtual void do_oop(oop* p);
  3943   virtual void do_oop(narrowOop* p);
  3944   void trim_queue(size_t max);
  3945   void handle_stack_overflow(HeapWord* lost);
  3946 };
  3948 // Grey object rescan during work stealing phase --
  3949 // the salient assumption here is that stolen oops must
  3950 // always be initialized, so we do not need to check for
  3951 // uninitialized objects before scanning here.
  3952 void Par_ConcMarkingClosure::do_oop(oop obj) {
  3953   assert(obj->is_oop_or_null(), "expected an oop or NULL");
  3954   HeapWord* addr = (HeapWord*)obj;
  3955   // Check if oop points into the CMS generation
  3956   // and is not marked
  3957   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
  3958     // a white object ...
  3959     // If we manage to "claim" the object, by being the
  3960     // first thread to mark it, then we push it on our
  3961     // marking stack
  3962     if (_bit_map->par_mark(addr)) {     // ... now grey
  3963       // push on work queue (grey set)
  3964       bool simulate_overflow = false;
  3965       NOT_PRODUCT(
  3966         if (CMSMarkStackOverflowALot &&
  3967             _collector->simulate_overflow()) {
  3968           // simulate a stack overflow
  3969           simulate_overflow = true;
  3972       if (simulate_overflow ||
  3973           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
  3974         // stack overflow
  3975         if (PrintCMSStatistics != 0) {
  3976           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
  3977                                  SIZE_FORMAT, _overflow_stack->capacity());
  3979         // We cannot assert that the overflow stack is full because
  3980         // it may have been emptied since.
  3981         assert(simulate_overflow ||
  3982                _work_queue->size() == _work_queue->max_elems(),
  3983               "Else push should have succeeded");
  3984         handle_stack_overflow(addr);
  3986     } // Else, some other thread got there first
  3990 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
  3991 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
  3993 void Par_ConcMarkingClosure::trim_queue(size_t max) {
  3994   while (_work_queue->size() > max) {
  3995     oop new_oop;
  3996     if (_work_queue->pop_local(new_oop)) {
  3997       assert(new_oop->is_oop(), "Should be an oop");
  3998       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
  3999       assert(_span.contains((HeapWord*)new_oop), "Not in span");
  4000       assert(new_oop->is_parsable(), "Should be parsable");
  4001       new_oop->oop_iterate(this);  // do_oop() above
  4006 // Upon stack overflow, we discard (part of) the stack,
  4007 // remembering the least address amongst those discarded
  4008 // in CMSCollector's _restart_address.
  4009 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
  4010   // We need to do this under a mutex to prevent other
  4011   // workers from interfering with the expansion below.
  4012   MutexLockerEx ml(_overflow_stack->par_lock(),
  4013                    Mutex::_no_safepoint_check_flag);
  4014   // Remember the least grey address discarded
  4015   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
  4016   _collector->lower_restart_addr(ra);
  4017   _overflow_stack->reset();  // discard stack contents
  4018   _overflow_stack->expand(); // expand the stack if possible
  4022 void CMSConcMarkingTask::do_work_steal(int i) {
  4023   OopTaskQueue* work_q = work_queue(i);
  4024   oop obj_to_scan;
  4025   CMSBitMap* bm = &(_collector->_markBitMap);
  4026   CMSMarkStack* ovflw = &(_collector->_markStack);
  4027   int* seed = _collector->hash_seed(i);
  4028   Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
  4029   while (true) {
  4030     cl.trim_queue(0);
  4031     assert(work_q->size() == 0, "Should have been emptied above");
  4032     if (get_work_from_overflow_stack(ovflw, work_q)) {
  4033       // Can't assert below because the work obtained from the
  4034       // overflow stack may already have been stolen from us.
  4035       // assert(work_q->size() > 0, "Work from overflow stack");
  4036       continue;
  4037     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
  4038       assert(obj_to_scan->is_oop(), "Should be an oop");
  4039       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
  4040       obj_to_scan->oop_iterate(&cl);
  4041     } else if (terminator()->offer_termination()) {
  4042       assert(work_q->size() == 0, "Impossible!");
  4043       break;
  4048 // This is run by the CMS (coordinator) thread.
  4049 void CMSConcMarkingTask::coordinator_yield() {
  4050   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  4051          "CMS thread should hold CMS token");
  4053   // First give up the locks, then yield, then re-lock
  4054   // We should probably use a constructor/destructor idiom to
  4055   // do this unlock/lock or modify the MutexUnlocker class to
  4056   // serve our purpose. XXX
  4057   assert_lock_strong(_bit_map_lock);
  4058   _bit_map_lock->unlock();
  4059   ConcurrentMarkSweepThread::desynchronize(true);
  4060   ConcurrentMarkSweepThread::acknowledge_yield_request();
  4061   _collector->stopTimer();
  4062   if (PrintCMSStatistics != 0) {
  4063     _collector->incrementYields();
  4065   _collector->icms_wait();
  4067   // It is possible for whichever thread initiated the yield request
  4068   // not to get a chance to wake up and take the bitmap lock between
  4069   // this thread releasing it and reacquiring it. So, while the
  4070   // should_yield() flag is on, let's sleep for a bit to give the
  4071   // other thread a chance to wake up. The limit imposed on the number
  4072   // of iterations is defensive, to avoid any unforseen circumstances
  4073   // putting us into an infinite loop. Since it's always been this
  4074   // (coordinator_yield()) method that was observed to cause the
  4075   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
  4076   // which is by default non-zero. For the other seven methods that
  4077   // also perform the yield operation, as are using a different
  4078   // parameter (CMSYieldSleepCount) which is by default zero. This way we
  4079   // can enable the sleeping for those methods too, if necessary.
  4080   // See 6442774.
  4081   //
  4082   // We really need to reconsider the synchronization between the GC
  4083   // thread and the yield-requesting threads in the future and we
  4084   // should really use wait/notify, which is the recommended
  4085   // way of doing this type of interaction. Additionally, we should
  4086   // consolidate the eight methods that do the yield operation and they
  4087   // are almost identical into one for better maintenability and
  4088   // readability. See 6445193.
  4089   //
  4090   // Tony 2006.06.29
  4091   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
  4092                    ConcurrentMarkSweepThread::should_yield() &&
  4093                    !CMSCollector::foregroundGCIsActive(); ++i) {
  4094     os::sleep(Thread::current(), 1, false);
  4095     ConcurrentMarkSweepThread::acknowledge_yield_request();
  4098   ConcurrentMarkSweepThread::synchronize(true);
  4099   _bit_map_lock->lock_without_safepoint_check();
  4100   _collector->startTimer();
  4103 bool CMSCollector::do_marking_mt(bool asynch) {
  4104   assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
  4105   // In the future this would be determined ergonomically, based
  4106   // on #cpu's, # active mutator threads (and load), and mutation rate.
  4107   int num_workers = ParallelCMSThreads;
  4109   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
  4110   CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
  4112   CMSConcMarkingTask tsk(this, cms_space, perm_space,
  4113                          asynch, num_workers /* number requested XXX */,
  4114                          conc_workers(), task_queues());
  4116   // Since the actual number of workers we get may be different
  4117   // from the number we requested above, do we need to do anything different
  4118   // below? In particular, may be we need to subclass the SequantialSubTasksDone
  4119   // class?? XXX
  4120   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
  4121   perm_space->initialize_sequential_subtasks_for_marking(num_workers);
  4123   // Refs discovery is already non-atomic.
  4124   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
  4125   // Mutate the Refs discovery so it is MT during the
  4126   // multi-threaded marking phase.
  4127   ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
  4129   conc_workers()->start_task(&tsk);
  4130   while (tsk.yielded()) {
  4131     tsk.coordinator_yield();
  4132     conc_workers()->continue_task(&tsk);
  4134   // If the task was aborted, _restart_addr will be non-NULL
  4135   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
  4136   while (_restart_addr != NULL) {
  4137     // XXX For now we do not make use of ABORTED state and have not
  4138     // yet implemented the right abort semantics (even in the original
  4139     // single-threaded CMS case). That needs some more investigation
  4140     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
  4141     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
  4142     // If _restart_addr is non-NULL, a marking stack overflow
  4143     // occured; we need to do a fresh marking iteration from the
  4144     // indicated restart address.
  4145     if (_foregroundGCIsActive && asynch) {
  4146       // We may be running into repeated stack overflows, having
  4147       // reached the limit of the stack size, while making very
  4148       // slow forward progress. It may be best to bail out and
  4149       // let the foreground collector do its job.
  4150       // Clear _restart_addr, so that foreground GC
  4151       // works from scratch. This avoids the headache of
  4152       // a "rescan" which would otherwise be needed because
  4153       // of the dirty mod union table & card table.
  4154       _restart_addr = NULL;
  4155       return false;
  4157     // Adjust the task to restart from _restart_addr
  4158     tsk.reset(_restart_addr);
  4159     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
  4160                   _restart_addr);
  4161     perm_space->initialize_sequential_subtasks_for_marking(num_workers,
  4162                   _restart_addr);
  4163     _restart_addr = NULL;
  4164     // Get the workers going again
  4165     conc_workers()->start_task(&tsk);
  4166     while (tsk.yielded()) {
  4167       tsk.coordinator_yield();
  4168       conc_workers()->continue_task(&tsk);
  4171   assert(tsk.completed(), "Inconsistency");
  4172   assert(tsk.result() == true, "Inconsistency");
  4173   return true;
  4176 bool CMSCollector::do_marking_st(bool asynch) {
  4177   ResourceMark rm;
  4178   HandleMark   hm;
  4180   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
  4181     &_markStack, &_revisitStack, CMSYield && asynch);
  4182   // the last argument to iterate indicates whether the iteration
  4183   // should be incremental with periodic yields.
  4184   _markBitMap.iterate(&markFromRootsClosure);
  4185   // If _restart_addr is non-NULL, a marking stack overflow
  4186   // occured; we need to do a fresh iteration from the
  4187   // indicated restart address.
  4188   while (_restart_addr != NULL) {
  4189     if (_foregroundGCIsActive && asynch) {
  4190       // We may be running into repeated stack overflows, having
  4191       // reached the limit of the stack size, while making very
  4192       // slow forward progress. It may be best to bail out and
  4193       // let the foreground collector do its job.
  4194       // Clear _restart_addr, so that foreground GC
  4195       // works from scratch. This avoids the headache of
  4196       // a "rescan" which would otherwise be needed because
  4197       // of the dirty mod union table & card table.
  4198       _restart_addr = NULL;
  4199       return false;  // indicating failure to complete marking
  4201     // Deal with stack overflow:
  4202     // we restart marking from _restart_addr
  4203     HeapWord* ra = _restart_addr;
  4204     markFromRootsClosure.reset(ra);
  4205     _restart_addr = NULL;
  4206     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
  4208   return true;
  4211 void CMSCollector::preclean() {
  4212   check_correct_thread_executing();
  4213   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
  4214   verify_work_stacks_empty();
  4215   verify_overflow_empty();
  4216   _abort_preclean = false;
  4217   if (CMSPrecleaningEnabled) {
  4218     _eden_chunk_index = 0;
  4219     size_t used = get_eden_used();
  4220     size_t capacity = get_eden_capacity();
  4221     // Don't start sampling unless we will get sufficiently
  4222     // many samples.
  4223     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
  4224                 * CMSScheduleRemarkEdenPenetration)) {
  4225       _start_sampling = true;
  4226     } else {
  4227       _start_sampling = false;
  4229     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  4230     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
  4231     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
  4233   CMSTokenSync x(true); // is cms thread
  4234   if (CMSPrecleaningEnabled) {
  4235     sample_eden();
  4236     _collectorState = AbortablePreclean;
  4237   } else {
  4238     _collectorState = FinalMarking;
  4240   verify_work_stacks_empty();
  4241   verify_overflow_empty();
  4244 // Try and schedule the remark such that young gen
  4245 // occupancy is CMSScheduleRemarkEdenPenetration %.
  4246 void CMSCollector::abortable_preclean() {
  4247   check_correct_thread_executing();
  4248   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
  4249   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
  4251   // If Eden's current occupancy is below this threshold,
  4252   // immediately schedule the remark; else preclean
  4253   // past the next scavenge in an effort to
  4254   // schedule the pause as described avove. By choosing
  4255   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
  4256   // we will never do an actual abortable preclean cycle.
  4257   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
  4258     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  4259     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
  4260     // We need more smarts in the abortable preclean
  4261     // loop below to deal with cases where allocation
  4262     // in young gen is very very slow, and our precleaning
  4263     // is running a losing race against a horde of
  4264     // mutators intent on flooding us with CMS updates
  4265     // (dirty cards).
  4266     // One, admittedly dumb, strategy is to give up
  4267     // after a certain number of abortable precleaning loops
  4268     // or after a certain maximum time. We want to make
  4269     // this smarter in the next iteration.
  4270     // XXX FIX ME!!! YSR
  4271     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
  4272     while (!(should_abort_preclean() ||
  4273              ConcurrentMarkSweepThread::should_terminate())) {
  4274       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
  4275       cumworkdone += workdone;
  4276       loops++;
  4277       // Voluntarily terminate abortable preclean phase if we have
  4278       // been at it for too long.
  4279       if ((CMSMaxAbortablePrecleanLoops != 0) &&
  4280           loops >= CMSMaxAbortablePrecleanLoops) {
  4281         if (PrintGCDetails) {
  4282           gclog_or_tty->print(" CMS: abort preclean due to loops ");
  4284         break;
  4286       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
  4287         if (PrintGCDetails) {
  4288           gclog_or_tty->print(" CMS: abort preclean due to time ");
  4290         break;
  4292       // If we are doing little work each iteration, we should
  4293       // take a short break.
  4294       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
  4295         // Sleep for some time, waiting for work to accumulate
  4296         stopTimer();
  4297         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
  4298         startTimer();
  4299         waited++;
  4302     if (PrintCMSStatistics > 0) {
  4303       gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
  4304                           loops, waited, cumworkdone);
  4307   CMSTokenSync x(true); // is cms thread
  4308   if (_collectorState != Idling) {
  4309     assert(_collectorState == AbortablePreclean,
  4310            "Spontaneous state transition?");
  4311     _collectorState = FinalMarking;
  4312   } // Else, a foreground collection completed this CMS cycle.
  4313   return;
  4316 // Respond to an Eden sampling opportunity
  4317 void CMSCollector::sample_eden() {
  4318   // Make sure a young gc cannot sneak in between our
  4319   // reading and recording of a sample.
  4320   assert(Thread::current()->is_ConcurrentGC_thread(),
  4321          "Only the cms thread may collect Eden samples");
  4322   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  4323          "Should collect samples while holding CMS token");
  4324   if (!_start_sampling) {
  4325     return;
  4327   if (_eden_chunk_array) {
  4328     if (_eden_chunk_index < _eden_chunk_capacity) {
  4329       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
  4330       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
  4331              "Unexpected state of Eden");
  4332       // We'd like to check that what we just sampled is an oop-start address;
  4333       // however, we cannot do that here since the object may not yet have been
  4334       // initialized. So we'll instead do the check when we _use_ this sample
  4335       // later.
  4336       if (_eden_chunk_index == 0 ||
  4337           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
  4338                          _eden_chunk_array[_eden_chunk_index-1])
  4339            >= CMSSamplingGrain)) {
  4340         _eden_chunk_index++;  // commit sample
  4344   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
  4345     size_t used = get_eden_used();
  4346     size_t capacity = get_eden_capacity();
  4347     assert(used <= capacity, "Unexpected state of Eden");
  4348     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
  4349       _abort_preclean = true;
  4355 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
  4356   assert(_collectorState == Precleaning ||
  4357          _collectorState == AbortablePreclean, "incorrect state");
  4358   ResourceMark rm;
  4359   HandleMark   hm;
  4360   // Do one pass of scrubbing the discovered reference lists
  4361   // to remove any reference objects with strongly-reachable
  4362   // referents.
  4363   if (clean_refs) {
  4364     ReferenceProcessor* rp = ref_processor();
  4365     CMSPrecleanRefsYieldClosure yield_cl(this);
  4366     assert(rp->span().equals(_span), "Spans should be equal");
  4367     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
  4368                                    &_markStack);
  4369     CMSDrainMarkingStackClosure complete_trace(this,
  4370                                   _span, &_markBitMap, &_markStack,
  4371                                   &keep_alive);
  4373     // We don't want this step to interfere with a young
  4374     // collection because we don't want to take CPU
  4375     // or memory bandwidth away from the young GC threads
  4376     // (which may be as many as there are CPUs).
  4377     // Note that we don't need to protect ourselves from
  4378     // interference with mutators because they can't
  4379     // manipulate the discovered reference lists nor affect
  4380     // the computed reachability of the referents, the
  4381     // only properties manipulated by the precleaning
  4382     // of these reference lists.
  4383     stopTimer();
  4384     CMSTokenSyncWithLocks x(true /* is cms thread */,
  4385                             bitMapLock());
  4386     startTimer();
  4387     sample_eden();
  4388     // The following will yield to allow foreground
  4389     // collection to proceed promptly. XXX YSR:
  4390     // The code in this method may need further
  4391     // tweaking for better performance and some restructuring
  4392     // for cleaner interfaces.
  4393     rp->preclean_discovered_references(
  4394           rp->is_alive_non_header(), &keep_alive, &complete_trace,
  4395           &yield_cl);
  4398   if (clean_survivor) {  // preclean the active survivor space(s)
  4399     assert(_young_gen->kind() == Generation::DefNew ||
  4400            _young_gen->kind() == Generation::ParNew ||
  4401            _young_gen->kind() == Generation::ASParNew,
  4402          "incorrect type for cast");
  4403     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
  4404     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
  4405                              &_markBitMap, &_modUnionTable,
  4406                              &_markStack, &_revisitStack,
  4407                              true /* precleaning phase */);
  4408     stopTimer();
  4409     CMSTokenSyncWithLocks ts(true /* is cms thread */,
  4410                              bitMapLock());
  4411     startTimer();
  4412     unsigned int before_count =
  4413       GenCollectedHeap::heap()->total_collections();
  4414     SurvivorSpacePrecleanClosure
  4415       sss_cl(this, _span, &_markBitMap, &_markStack,
  4416              &pam_cl, before_count, CMSYield);
  4417     dng->from()->object_iterate_careful(&sss_cl);
  4418     dng->to()->object_iterate_careful(&sss_cl);
  4420   MarkRefsIntoAndScanClosure
  4421     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
  4422              &_markStack, &_revisitStack, this, CMSYield,
  4423              true /* precleaning phase */);
  4424   // CAUTION: The following closure has persistent state that may need to
  4425   // be reset upon a decrease in the sequence of addresses it
  4426   // processes.
  4427   ScanMarkedObjectsAgainCarefullyClosure
  4428     smoac_cl(this, _span,
  4429       &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield);
  4431   // Preclean dirty cards in ModUnionTable and CardTable using
  4432   // appropriate convergence criterion;
  4433   // repeat CMSPrecleanIter times unless we find that
  4434   // we are losing.
  4435   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
  4436   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
  4437          "Bad convergence multiplier");
  4438   assert(CMSPrecleanThreshold >= 100,
  4439          "Unreasonably low CMSPrecleanThreshold");
  4441   size_t numIter, cumNumCards, lastNumCards, curNumCards;
  4442   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
  4443        numIter < CMSPrecleanIter;
  4444        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
  4445     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
  4446     if (CMSPermGenPrecleaningEnabled) {
  4447       curNumCards  += preclean_mod_union_table(_permGen, &smoac_cl);
  4449     if (Verbose && PrintGCDetails) {
  4450       gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
  4452     // Either there are very few dirty cards, so re-mark
  4453     // pause will be small anyway, or our pre-cleaning isn't
  4454     // that much faster than the rate at which cards are being
  4455     // dirtied, so we might as well stop and re-mark since
  4456     // precleaning won't improve our re-mark time by much.
  4457     if (curNumCards <= CMSPrecleanThreshold ||
  4458         (numIter > 0 &&
  4459          (curNumCards * CMSPrecleanDenominator >
  4460          lastNumCards * CMSPrecleanNumerator))) {
  4461       numIter++;
  4462       cumNumCards += curNumCards;
  4463       break;
  4466   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
  4467   if (CMSPermGenPrecleaningEnabled) {
  4468     curNumCards += preclean_card_table(_permGen, &smoac_cl);
  4470   cumNumCards += curNumCards;
  4471   if (PrintGCDetails && PrintCMSStatistics != 0) {
  4472     gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
  4473                   curNumCards, cumNumCards, numIter);
  4475   return cumNumCards;   // as a measure of useful work done
  4478 // PRECLEANING NOTES:
  4479 // Precleaning involves:
  4480 // . reading the bits of the modUnionTable and clearing the set bits.
  4481 // . For the cards corresponding to the set bits, we scan the
  4482 //   objects on those cards. This means we need the free_list_lock
  4483 //   so that we can safely iterate over the CMS space when scanning
  4484 //   for oops.
  4485 // . When we scan the objects, we'll be both reading and setting
  4486 //   marks in the marking bit map, so we'll need the marking bit map.
  4487 // . For protecting _collector_state transitions, we take the CGC_lock.
  4488 //   Note that any races in the reading of of card table entries by the
  4489 //   CMS thread on the one hand and the clearing of those entries by the
  4490 //   VM thread or the setting of those entries by the mutator threads on the
  4491 //   other are quite benign. However, for efficiency it makes sense to keep
  4492 //   the VM thread from racing with the CMS thread while the latter is
  4493 //   dirty card info to the modUnionTable. We therefore also use the
  4494 //   CGC_lock to protect the reading of the card table and the mod union
  4495 //   table by the CM thread.
  4496 // . We run concurrently with mutator updates, so scanning
  4497 //   needs to be done carefully  -- we should not try to scan
  4498 //   potentially uninitialized objects.
  4499 //
  4500 // Locking strategy: While holding the CGC_lock, we scan over and
  4501 // reset a maximal dirty range of the mod union / card tables, then lock
  4502 // the free_list_lock and bitmap lock to do a full marking, then
  4503 // release these locks; and repeat the cycle. This allows for a
  4504 // certain amount of fairness in the sharing of these locks between
  4505 // the CMS collector on the one hand, and the VM thread and the
  4506 // mutators on the other.
  4508 // NOTE: preclean_mod_union_table() and preclean_card_table()
  4509 // further below are largely identical; if you need to modify
  4510 // one of these methods, please check the other method too.
  4512 size_t CMSCollector::preclean_mod_union_table(
  4513   ConcurrentMarkSweepGeneration* gen,
  4514   ScanMarkedObjectsAgainCarefullyClosure* cl) {
  4515   verify_work_stacks_empty();
  4516   verify_overflow_empty();
  4518   // strategy: starting with the first card, accumulate contiguous
  4519   // ranges of dirty cards; clear these cards, then scan the region
  4520   // covered by these cards.
  4522   // Since all of the MUT is committed ahead, we can just use
  4523   // that, in case the generations expand while we are precleaning.
  4524   // It might also be fine to just use the committed part of the
  4525   // generation, but we might potentially miss cards when the
  4526   // generation is rapidly expanding while we are in the midst
  4527   // of precleaning.
  4528   HeapWord* startAddr = gen->reserved().start();
  4529   HeapWord* endAddr   = gen->reserved().end();
  4531   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
  4533   size_t numDirtyCards, cumNumDirtyCards;
  4534   HeapWord *nextAddr, *lastAddr;
  4535   for (cumNumDirtyCards = numDirtyCards = 0,
  4536        nextAddr = lastAddr = startAddr;
  4537        nextAddr < endAddr;
  4538        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
  4540     ResourceMark rm;
  4541     HandleMark   hm;
  4543     MemRegion dirtyRegion;
  4545       stopTimer();
  4546       CMSTokenSync ts(true);
  4547       startTimer();
  4548       sample_eden();
  4549       // Get dirty region starting at nextOffset (inclusive),
  4550       // simultaneously clearing it.
  4551       dirtyRegion =
  4552         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
  4553       assert(dirtyRegion.start() >= nextAddr,
  4554              "returned region inconsistent?");
  4556     // Remember where the next search should begin.
  4557     // The returned region (if non-empty) is a right open interval,
  4558     // so lastOffset is obtained from the right end of that
  4559     // interval.
  4560     lastAddr = dirtyRegion.end();
  4561     // Should do something more transparent and less hacky XXX
  4562     numDirtyCards =
  4563       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
  4565     // We'll scan the cards in the dirty region (with periodic
  4566     // yields for foreground GC as needed).
  4567     if (!dirtyRegion.is_empty()) {
  4568       assert(numDirtyCards > 0, "consistency check");
  4569       HeapWord* stop_point = NULL;
  4571         stopTimer();
  4572         CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
  4573                                  bitMapLock());
  4574         startTimer();
  4575         verify_work_stacks_empty();
  4576         verify_overflow_empty();
  4577         sample_eden();
  4578         stop_point =
  4579           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
  4581       if (stop_point != NULL) {
  4582         // The careful iteration stopped early either because it found an
  4583         // uninitialized object, or because we were in the midst of an
  4584         // "abortable preclean", which should now be aborted. Redirty
  4585         // the bits corresponding to the partially-scanned or unscanned
  4586         // cards. We'll either restart at the next block boundary or
  4587         // abort the preclean.
  4588         assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
  4589                (_collectorState == AbortablePreclean && should_abort_preclean()),
  4590                "Unparsable objects should only be in perm gen.");
  4592         stopTimer();
  4593         CMSTokenSyncWithLocks ts(true, bitMapLock());
  4594         startTimer();
  4595         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
  4596         if (should_abort_preclean()) {
  4597           break; // out of preclean loop
  4598         } else {
  4599           // Compute the next address at which preclean should pick up;
  4600           // might need bitMapLock in order to read P-bits.
  4601           lastAddr = next_card_start_after_block(stop_point);
  4604     } else {
  4605       assert(lastAddr == endAddr, "consistency check");
  4606       assert(numDirtyCards == 0, "consistency check");
  4607       break;
  4610   verify_work_stacks_empty();
  4611   verify_overflow_empty();
  4612   return cumNumDirtyCards;
  4615 // NOTE: preclean_mod_union_table() above and preclean_card_table()
  4616 // below are largely identical; if you need to modify
  4617 // one of these methods, please check the other method too.
  4619 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
  4620   ScanMarkedObjectsAgainCarefullyClosure* cl) {
  4621   // strategy: it's similar to precleamModUnionTable above, in that
  4622   // we accumulate contiguous ranges of dirty cards, mark these cards
  4623   // precleaned, then scan the region covered by these cards.
  4624   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
  4625   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
  4627   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
  4629   size_t numDirtyCards, cumNumDirtyCards;
  4630   HeapWord *lastAddr, *nextAddr;
  4632   for (cumNumDirtyCards = numDirtyCards = 0,
  4633        nextAddr = lastAddr = startAddr;
  4634        nextAddr < endAddr;
  4635        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
  4637     ResourceMark rm;
  4638     HandleMark   hm;
  4640     MemRegion dirtyRegion;
  4642       // See comments in "Precleaning notes" above on why we
  4643       // do this locking. XXX Could the locking overheads be
  4644       // too high when dirty cards are sparse? [I don't think so.]
  4645       stopTimer();
  4646       CMSTokenSync x(true); // is cms thread
  4647       startTimer();
  4648       sample_eden();
  4649       // Get and clear dirty region from card table
  4650       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_preclean(
  4651                                     MemRegion(nextAddr, endAddr));
  4652       assert(dirtyRegion.start() >= nextAddr,
  4653              "returned region inconsistent?");
  4655     lastAddr = dirtyRegion.end();
  4656     numDirtyCards =
  4657       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
  4659     if (!dirtyRegion.is_empty()) {
  4660       stopTimer();
  4661       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
  4662       startTimer();
  4663       sample_eden();
  4664       verify_work_stacks_empty();
  4665       verify_overflow_empty();
  4666       HeapWord* stop_point =
  4667         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
  4668       if (stop_point != NULL) {
  4669         // The careful iteration stopped early because it found an
  4670         // uninitialized object.  Redirty the bits corresponding to the
  4671         // partially-scanned or unscanned cards, and start again at the
  4672         // next block boundary.
  4673         assert(CMSPermGenPrecleaningEnabled ||
  4674                (_collectorState == AbortablePreclean && should_abort_preclean()),
  4675                "Unparsable objects should only be in perm gen.");
  4676         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
  4677         if (should_abort_preclean()) {
  4678           break; // out of preclean loop
  4679         } else {
  4680           // Compute the next address at which preclean should pick up.
  4681           lastAddr = next_card_start_after_block(stop_point);
  4684     } else {
  4685       break;
  4688   verify_work_stacks_empty();
  4689   verify_overflow_empty();
  4690   return cumNumDirtyCards;
  4693 void CMSCollector::checkpointRootsFinal(bool asynch,
  4694   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
  4695   assert(_collectorState == FinalMarking, "incorrect state transition?");
  4696   check_correct_thread_executing();
  4697   // world is stopped at this checkpoint
  4698   assert(SafepointSynchronize::is_at_safepoint(),
  4699          "world should be stopped");
  4700   verify_work_stacks_empty();
  4701   verify_overflow_empty();
  4703   SpecializationStats::clear();
  4704   if (PrintGCDetails) {
  4705     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
  4706                         _young_gen->used() / K,
  4707                         _young_gen->capacity() / K);
  4709   if (asynch) {
  4710     if (CMSScavengeBeforeRemark) {
  4711       GenCollectedHeap* gch = GenCollectedHeap::heap();
  4712       // Temporarily set flag to false, GCH->do_collection will
  4713       // expect it to be false and set to true
  4714       FlagSetting fl(gch->_is_gc_active, false);
  4715       NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
  4716         PrintGCDetails && Verbose, true, gclog_or_tty);)
  4717       int level = _cmsGen->level() - 1;
  4718       if (level >= 0) {
  4719         gch->do_collection(true,        // full (i.e. force, see below)
  4720                            false,       // !clear_all_soft_refs
  4721                            0,           // size
  4722                            false,       // is_tlab
  4723                            level        // max_level
  4724                           );
  4727     FreelistLocker x(this);
  4728     MutexLockerEx y(bitMapLock(),
  4729                     Mutex::_no_safepoint_check_flag);
  4730     assert(!init_mark_was_synchronous, "but that's impossible!");
  4731     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
  4732   } else {
  4733     // already have all the locks
  4734     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
  4735                              init_mark_was_synchronous);
  4737   verify_work_stacks_empty();
  4738   verify_overflow_empty();
  4739   SpecializationStats::print();
  4742 void CMSCollector::checkpointRootsFinalWork(bool asynch,
  4743   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
  4745   NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
  4747   assert(haveFreelistLocks(), "must have free list locks");
  4748   assert_lock_strong(bitMapLock());
  4750   if (UseAdaptiveSizePolicy) {
  4751     size_policy()->checkpoint_roots_final_begin();
  4754   ResourceMark rm;
  4755   HandleMark   hm;
  4757   GenCollectedHeap* gch = GenCollectedHeap::heap();
  4759   if (should_unload_classes()) {
  4760     CodeCache::gc_prologue();
  4762   assert(haveFreelistLocks(), "must have free list locks");
  4763   assert_lock_strong(bitMapLock());
  4765   if (!init_mark_was_synchronous) {
  4766     // We might assume that we need not fill TLAB's when
  4767     // CMSScavengeBeforeRemark is set, because we may have just done
  4768     // a scavenge which would have filled all TLAB's -- and besides
  4769     // Eden would be empty. This however may not always be the case --
  4770     // for instance although we asked for a scavenge, it may not have
  4771     // happened because of a JNI critical section. We probably need
  4772     // a policy for deciding whether we can in that case wait until
  4773     // the critical section releases and then do the remark following
  4774     // the scavenge, and skip it here. In the absence of that policy,
  4775     // or of an indication of whether the scavenge did indeed occur,
  4776     // we cannot rely on TLAB's having been filled and must do
  4777     // so here just in case a scavenge did not happen.
  4778     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
  4779     // Update the saved marks which may affect the root scans.
  4780     gch->save_marks();
  4783       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
  4785       // Note on the role of the mod union table:
  4786       // Since the marker in "markFromRoots" marks concurrently with
  4787       // mutators, it is possible for some reachable objects not to have been
  4788       // scanned. For instance, an only reference to an object A was
  4789       // placed in object B after the marker scanned B. Unless B is rescanned,
  4790       // A would be collected. Such updates to references in marked objects
  4791       // are detected via the mod union table which is the set of all cards
  4792       // dirtied since the first checkpoint in this GC cycle and prior to
  4793       // the most recent young generation GC, minus those cleaned up by the
  4794       // concurrent precleaning.
  4795       if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) {
  4796         TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
  4797         do_remark_parallel();
  4798       } else {
  4799         TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
  4800                     gclog_or_tty);
  4801         do_remark_non_parallel();
  4804   } else {
  4805     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
  4806     // The initial mark was stop-world, so there's no rescanning to
  4807     // do; go straight on to the next step below.
  4809   verify_work_stacks_empty();
  4810   verify_overflow_empty();
  4813     NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
  4814     refProcessingWork(asynch, clear_all_soft_refs);
  4816   verify_work_stacks_empty();
  4817   verify_overflow_empty();
  4819   if (should_unload_classes()) {
  4820     CodeCache::gc_epilogue();
  4823   // If we encountered any (marking stack / work queue) overflow
  4824   // events during the current CMS cycle, take appropriate
  4825   // remedial measures, where possible, so as to try and avoid
  4826   // recurrence of that condition.
  4827   assert(_markStack.isEmpty(), "No grey objects");
  4828   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
  4829                      _ser_kac_ovflw;
  4830   if (ser_ovflw > 0) {
  4831     if (PrintCMSStatistics != 0) {
  4832       gclog_or_tty->print_cr("Marking stack overflow (benign) "
  4833         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
  4834         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
  4835         _ser_kac_ovflw);
  4837     _markStack.expand();
  4838     _ser_pmc_remark_ovflw = 0;
  4839     _ser_pmc_preclean_ovflw = 0;
  4840     _ser_kac_ovflw = 0;
  4842   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
  4843     if (PrintCMSStatistics != 0) {
  4844       gclog_or_tty->print_cr("Work queue overflow (benign) "
  4845         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
  4846         _par_pmc_remark_ovflw, _par_kac_ovflw);
  4848     _par_pmc_remark_ovflw = 0;
  4849     _par_kac_ovflw = 0;
  4851   if (PrintCMSStatistics != 0) {
  4852      if (_markStack._hit_limit > 0) {
  4853        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
  4854                               _markStack._hit_limit);
  4856      if (_markStack._failed_double > 0) {
  4857        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
  4858                               " current capacity "SIZE_FORMAT,
  4859                               _markStack._failed_double,
  4860                               _markStack.capacity());
  4863   _markStack._hit_limit = 0;
  4864   _markStack._failed_double = 0;
  4866   if ((VerifyAfterGC || VerifyDuringGC) &&
  4867       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  4868     verify_after_remark();
  4871   // Change under the freelistLocks.
  4872   _collectorState = Sweeping;
  4873   // Call isAllClear() under bitMapLock
  4874   assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
  4875     " final marking");
  4876   if (UseAdaptiveSizePolicy) {
  4877     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
  4881 // Parallel remark task
  4882 class CMSParRemarkTask: public AbstractGangTask {
  4883   CMSCollector* _collector;
  4884   WorkGang*     _workers;
  4885   int           _n_workers;
  4886   CompactibleFreeListSpace* _cms_space;
  4887   CompactibleFreeListSpace* _perm_space;
  4889   // The per-thread work queues, available here for stealing.
  4890   OopTaskQueueSet*       _task_queues;
  4891   ParallelTaskTerminator _term;
  4893  public:
  4894   CMSParRemarkTask(CMSCollector* collector,
  4895                    CompactibleFreeListSpace* cms_space,
  4896                    CompactibleFreeListSpace* perm_space,
  4897                    int n_workers, WorkGang* workers,
  4898                    OopTaskQueueSet* task_queues):
  4899     AbstractGangTask("Rescan roots and grey objects in parallel"),
  4900     _collector(collector),
  4901     _cms_space(cms_space), _perm_space(perm_space),
  4902     _n_workers(n_workers),
  4903     _workers(workers),
  4904     _task_queues(task_queues),
  4905     _term(workers->total_workers(), task_queues) { }
  4907   OopTaskQueueSet* task_queues() { return _task_queues; }
  4909   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
  4911   ParallelTaskTerminator* terminator() { return &_term; }
  4913   void work(int i);
  4915  private:
  4916   // Work method in support of parallel rescan ... of young gen spaces
  4917   void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
  4918                              ContiguousSpace* space,
  4919                              HeapWord** chunk_array, size_t chunk_top);
  4921   // ... of  dirty cards in old space
  4922   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
  4923                                   Par_MarkRefsIntoAndScanClosure* cl);
  4925   // ... work stealing for the above
  4926   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
  4927 };
  4929 void CMSParRemarkTask::work(int i) {
  4930   elapsedTimer _timer;
  4931   ResourceMark rm;
  4932   HandleMark   hm;
  4934   // ---------- rescan from roots --------------
  4935   _timer.start();
  4936   GenCollectedHeap* gch = GenCollectedHeap::heap();
  4937   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
  4938     _collector->_span, _collector->ref_processor(),
  4939     &(_collector->_markBitMap),
  4940     work_queue(i), &(_collector->_revisitStack));
  4942   // Rescan young gen roots first since these are likely
  4943   // coarsely partitioned and may, on that account, constitute
  4944   // the critical path; thus, it's best to start off that
  4945   // work first.
  4946   // ---------- young gen roots --------------
  4948     DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
  4949     EdenSpace* eden_space = dng->eden();
  4950     ContiguousSpace* from_space = dng->from();
  4951     ContiguousSpace* to_space   = dng->to();
  4953     HeapWord** eca = _collector->_eden_chunk_array;
  4954     size_t     ect = _collector->_eden_chunk_index;
  4955     HeapWord** sca = _collector->_survivor_chunk_array;
  4956     size_t     sct = _collector->_survivor_chunk_index;
  4958     assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
  4959     assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
  4961     do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
  4962     do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
  4963     do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
  4965     _timer.stop();
  4966     if (PrintCMSStatistics != 0) {
  4967       gclog_or_tty->print_cr(
  4968         "Finished young gen rescan work in %dth thread: %3.3f sec",
  4969         i, _timer.seconds());
  4973   // ---------- remaining roots --------------
  4974   _timer.reset();
  4975   _timer.start();
  4976   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
  4977                                 false,     // yg was scanned above
  4978                                 true,      // collecting perm gen
  4979                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
  4980                                 NULL, &par_mrias_cl);
  4981   _timer.stop();
  4982   if (PrintCMSStatistics != 0) {
  4983     gclog_or_tty->print_cr(
  4984       "Finished remaining root rescan work in %dth thread: %3.3f sec",
  4985       i, _timer.seconds());
  4988   // ---------- rescan dirty cards ------------
  4989   _timer.reset();
  4990   _timer.start();
  4992   // Do the rescan tasks for each of the two spaces
  4993   // (cms_space and perm_space) in turn.
  4994   do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
  4995   do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
  4996   _timer.stop();
  4997   if (PrintCMSStatistics != 0) {
  4998     gclog_or_tty->print_cr(
  4999       "Finished dirty card rescan work in %dth thread: %3.3f sec",
  5000       i, _timer.seconds());
  5003   // ---------- steal work from other threads ...
  5004   // ---------- ... and drain overflow list.
  5005   _timer.reset();
  5006   _timer.start();
  5007   do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
  5008   _timer.stop();
  5009   if (PrintCMSStatistics != 0) {
  5010     gclog_or_tty->print_cr(
  5011       "Finished work stealing in %dth thread: %3.3f sec",
  5012       i, _timer.seconds());
  5016 void
  5017 CMSParRemarkTask::do_young_space_rescan(int i,
  5018   Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
  5019   HeapWord** chunk_array, size_t chunk_top) {
  5020   // Until all tasks completed:
  5021   // . claim an unclaimed task
  5022   // . compute region boundaries corresponding to task claimed
  5023   //   using chunk_array
  5024   // . par_oop_iterate(cl) over that region
  5026   ResourceMark rm;
  5027   HandleMark   hm;
  5029   SequentialSubTasksDone* pst = space->par_seq_tasks();
  5030   assert(pst->valid(), "Uninitialized use?");
  5032   int nth_task = 0;
  5033   int n_tasks  = pst->n_tasks();
  5035   HeapWord *start, *end;
  5036   while (!pst->is_task_claimed(/* reference */ nth_task)) {
  5037     // We claimed task # nth_task; compute its boundaries.
  5038     if (chunk_top == 0) {  // no samples were taken
  5039       assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
  5040       start = space->bottom();
  5041       end   = space->top();
  5042     } else if (nth_task == 0) {
  5043       start = space->bottom();
  5044       end   = chunk_array[nth_task];
  5045     } else if (nth_task < (jint)chunk_top) {
  5046       assert(nth_task >= 1, "Control point invariant");
  5047       start = chunk_array[nth_task - 1];
  5048       end   = chunk_array[nth_task];
  5049     } else {
  5050       assert(nth_task == (jint)chunk_top, "Control point invariant");
  5051       start = chunk_array[chunk_top - 1];
  5052       end   = space->top();
  5054     MemRegion mr(start, end);
  5055     // Verify that mr is in space
  5056     assert(mr.is_empty() || space->used_region().contains(mr),
  5057            "Should be in space");
  5058     // Verify that "start" is an object boundary
  5059     assert(mr.is_empty() || oop(mr.start())->is_oop(),
  5060            "Should be an oop");
  5061     space->par_oop_iterate(mr, cl);
  5063   pst->all_tasks_completed();
  5066 void
  5067 CMSParRemarkTask::do_dirty_card_rescan_tasks(
  5068   CompactibleFreeListSpace* sp, int i,
  5069   Par_MarkRefsIntoAndScanClosure* cl) {
  5070   // Until all tasks completed:
  5071   // . claim an unclaimed task
  5072   // . compute region boundaries corresponding to task claimed
  5073   // . transfer dirty bits ct->mut for that region
  5074   // . apply rescanclosure to dirty mut bits for that region
  5076   ResourceMark rm;
  5077   HandleMark   hm;
  5079   OopTaskQueue* work_q = work_queue(i);
  5080   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
  5081   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
  5082   // CAUTION: This closure has state that persists across calls to
  5083   // the work method dirty_range_iterate_clear() in that it has
  5084   // imbedded in it a (subtype of) UpwardsObjectClosure. The
  5085   // use of that state in the imbedded UpwardsObjectClosure instance
  5086   // assumes that the cards are always iterated (even if in parallel
  5087   // by several threads) in monotonically increasing order per each
  5088   // thread. This is true of the implementation below which picks
  5089   // card ranges (chunks) in monotonically increasing order globally
  5090   // and, a-fortiori, in monotonically increasing order per thread
  5091   // (the latter order being a subsequence of the former).
  5092   // If the work code below is ever reorganized into a more chaotic
  5093   // work-partitioning form than the current "sequential tasks"
  5094   // paradigm, the use of that persistent state will have to be
  5095   // revisited and modified appropriately. See also related
  5096   // bug 4756801 work on which should examine this code to make
  5097   // sure that the changes there do not run counter to the
  5098   // assumptions made here and necessary for correctness and
  5099   // efficiency. Note also that this code might yield inefficient
  5100   // behaviour in the case of very large objects that span one or
  5101   // more work chunks. Such objects would potentially be scanned
  5102   // several times redundantly. Work on 4756801 should try and
  5103   // address that performance anomaly if at all possible. XXX
  5104   MemRegion  full_span  = _collector->_span;
  5105   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
  5106   CMSMarkStack* rs = &(_collector->_revisitStack);   // shared
  5107   MarkFromDirtyCardsClosure
  5108     greyRescanClosure(_collector, full_span, // entire span of interest
  5109                       sp, bm, work_q, rs, cl);
  5111   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
  5112   assert(pst->valid(), "Uninitialized use?");
  5113   int nth_task = 0;
  5114   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
  5115   MemRegion span = sp->used_region();
  5116   HeapWord* start_addr = span.start();
  5117   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
  5118                                            alignment);
  5119   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
  5120   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
  5121          start_addr, "Check alignment");
  5122   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
  5123          chunk_size, "Check alignment");
  5125   while (!pst->is_task_claimed(/* reference */ nth_task)) {
  5126     // Having claimed the nth_task, compute corresponding mem-region,
  5127     // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
  5128     // The alignment restriction ensures that we do not need any
  5129     // synchronization with other gang-workers while setting or
  5130     // clearing bits in thus chunk of the MUT.
  5131     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
  5132                                     start_addr + (nth_task+1)*chunk_size);
  5133     // The last chunk's end might be way beyond end of the
  5134     // used region. In that case pull back appropriately.
  5135     if (this_span.end() > end_addr) {
  5136       this_span.set_end(end_addr);
  5137       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
  5139     // Iterate over the dirty cards covering this chunk, marking them
  5140     // precleaned, and setting the corresponding bits in the mod union
  5141     // table. Since we have been careful to partition at Card and MUT-word
  5142     // boundaries no synchronization is needed between parallel threads.
  5143     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
  5144                                                  &modUnionClosure);
  5146     // Having transferred these marks into the modUnionTable,
  5147     // rescan the marked objects on the dirty cards in the modUnionTable.
  5148     // Even if this is at a synchronous collection, the initial marking
  5149     // may have been done during an asynchronous collection so there
  5150     // may be dirty bits in the mod-union table.
  5151     _collector->_modUnionTable.dirty_range_iterate_clear(
  5152                   this_span, &greyRescanClosure);
  5153     _collector->_modUnionTable.verifyNoOneBitsInRange(
  5154                                  this_span.start(),
  5155                                  this_span.end());
  5157   pst->all_tasks_completed();  // declare that i am done
  5160 // . see if we can share work_queues with ParNew? XXX
  5161 void
  5162 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
  5163                                 int* seed) {
  5164   OopTaskQueue* work_q = work_queue(i);
  5165   NOT_PRODUCT(int num_steals = 0;)
  5166   oop obj_to_scan;
  5167   CMSBitMap* bm = &(_collector->_markBitMap);
  5168   size_t num_from_overflow_list =
  5169            MIN2((size_t)work_q->max_elems()/4,
  5170                 (size_t)ParGCDesiredObjsFromOverflowList);
  5172   while (true) {
  5173     // Completely finish any left over work from (an) earlier round(s)
  5174     cl->trim_queue(0);
  5175     // Now check if there's any work in the overflow list
  5176     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
  5177                                                 work_q)) {
  5178       // found something in global overflow list;
  5179       // not yet ready to go stealing work from others.
  5180       // We'd like to assert(work_q->size() != 0, ...)
  5181       // because we just took work from the overflow list,
  5182       // but of course we can't since all of that could have
  5183       // been already stolen from us.
  5184       // "He giveth and He taketh away."
  5185       continue;
  5187     // Verify that we have no work before we resort to stealing
  5188     assert(work_q->size() == 0, "Have work, shouldn't steal");
  5189     // Try to steal from other queues that have work
  5190     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
  5191       NOT_PRODUCT(num_steals++;)
  5192       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
  5193       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
  5194       // Do scanning work
  5195       obj_to_scan->oop_iterate(cl);
  5196       // Loop around, finish this work, and try to steal some more
  5197     } else if (terminator()->offer_termination()) {
  5198         break;  // nirvana from the infinite cycle
  5201   NOT_PRODUCT(
  5202     if (PrintCMSStatistics != 0) {
  5203       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
  5206   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
  5207          "Else our work is not yet done");
  5210 // Return a thread-local PLAB recording array, as appropriate.
  5211 void* CMSCollector::get_data_recorder(int thr_num) {
  5212   if (_survivor_plab_array != NULL &&
  5213       (CMSPLABRecordAlways ||
  5214        (_collectorState > Marking && _collectorState < FinalMarking))) {
  5215     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
  5216     ChunkArray* ca = &_survivor_plab_array[thr_num];
  5217     ca->reset();   // clear it so that fresh data is recorded
  5218     return (void*) ca;
  5219   } else {
  5220     return NULL;
  5224 // Reset all the thread-local PLAB recording arrays
  5225 void CMSCollector::reset_survivor_plab_arrays() {
  5226   for (uint i = 0; i < ParallelGCThreads; i++) {
  5227     _survivor_plab_array[i].reset();
  5231 // Merge the per-thread plab arrays into the global survivor chunk
  5232 // array which will provide the partitioning of the survivor space
  5233 // for CMS rescan.
  5234 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) {
  5235   assert(_survivor_plab_array  != NULL, "Error");
  5236   assert(_survivor_chunk_array != NULL, "Error");
  5237   assert(_collectorState == FinalMarking, "Error");
  5238   for (uint j = 0; j < ParallelGCThreads; j++) {
  5239     _cursor[j] = 0;
  5241   HeapWord* top = surv->top();
  5242   size_t i;
  5243   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
  5244     HeapWord* min_val = top;          // Higher than any PLAB address
  5245     uint      min_tid = 0;            // position of min_val this round
  5246     for (uint j = 0; j < ParallelGCThreads; j++) {
  5247       ChunkArray* cur_sca = &_survivor_plab_array[j];
  5248       if (_cursor[j] == cur_sca->end()) {
  5249         continue;
  5251       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
  5252       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
  5253       assert(surv->used_region().contains(cur_val), "Out of bounds value");
  5254       if (cur_val < min_val) {
  5255         min_tid = j;
  5256         min_val = cur_val;
  5257       } else {
  5258         assert(cur_val < top, "All recorded addresses should be less");
  5261     // At this point min_val and min_tid are respectively
  5262     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
  5263     // and the thread (j) that witnesses that address.
  5264     // We record this address in the _survivor_chunk_array[i]
  5265     // and increment _cursor[min_tid] prior to the next round i.
  5266     if (min_val == top) {
  5267       break;
  5269     _survivor_chunk_array[i] = min_val;
  5270     _cursor[min_tid]++;
  5272   // We are all done; record the size of the _survivor_chunk_array
  5273   _survivor_chunk_index = i; // exclusive: [0, i)
  5274   if (PrintCMSStatistics > 0) {
  5275     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
  5277   // Verify that we used up all the recorded entries
  5278   #ifdef ASSERT
  5279     size_t total = 0;
  5280     for (uint j = 0; j < ParallelGCThreads; j++) {
  5281       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
  5282       total += _cursor[j];
  5284     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
  5285     // Check that the merged array is in sorted order
  5286     if (total > 0) {
  5287       for (size_t i = 0; i < total - 1; i++) {
  5288         if (PrintCMSStatistics > 0) {
  5289           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
  5290                               i, _survivor_chunk_array[i]);
  5292         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
  5293                "Not sorted");
  5296   #endif // ASSERT
  5299 // Set up the space's par_seq_tasks structure for work claiming
  5300 // for parallel rescan of young gen.
  5301 // See ParRescanTask where this is currently used.
  5302 void
  5303 CMSCollector::
  5304 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
  5305   assert(n_threads > 0, "Unexpected n_threads argument");
  5306   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
  5308   // Eden space
  5310     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
  5311     assert(!pst->valid(), "Clobbering existing data?");
  5312     // Each valid entry in [0, _eden_chunk_index) represents a task.
  5313     size_t n_tasks = _eden_chunk_index + 1;
  5314     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
  5315     pst->set_par_threads(n_threads);
  5316     pst->set_n_tasks((int)n_tasks);
  5319   // Merge the survivor plab arrays into _survivor_chunk_array
  5320   if (_survivor_plab_array != NULL) {
  5321     merge_survivor_plab_arrays(dng->from());
  5322   } else {
  5323     assert(_survivor_chunk_index == 0, "Error");
  5326   // To space
  5328     SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
  5329     assert(!pst->valid(), "Clobbering existing data?");
  5330     pst->set_par_threads(n_threads);
  5331     pst->set_n_tasks(1);
  5332     assert(pst->valid(), "Error");
  5335   // From space
  5337     SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
  5338     assert(!pst->valid(), "Clobbering existing data?");
  5339     size_t n_tasks = _survivor_chunk_index + 1;
  5340     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
  5341     pst->set_par_threads(n_threads);
  5342     pst->set_n_tasks((int)n_tasks);
  5343     assert(pst->valid(), "Error");
  5347 // Parallel version of remark
  5348 void CMSCollector::do_remark_parallel() {
  5349   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5350   WorkGang* workers = gch->workers();
  5351   assert(workers != NULL, "Need parallel worker threads.");
  5352   int n_workers = workers->total_workers();
  5353   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
  5354   CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
  5356   CMSParRemarkTask tsk(this,
  5357     cms_space, perm_space,
  5358     n_workers, workers, task_queues());
  5360   // Set up for parallel process_strong_roots work.
  5361   gch->set_par_threads(n_workers);
  5362   gch->change_strong_roots_parity();
  5363   // We won't be iterating over the cards in the card table updating
  5364   // the younger_gen cards, so we shouldn't call the following else
  5365   // the verification code as well as subsequent younger_refs_iterate
  5366   // code would get confused. XXX
  5367   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
  5369   // The young gen rescan work will not be done as part of
  5370   // process_strong_roots (which currently doesn't knw how to
  5371   // parallelize such a scan), but rather will be broken up into
  5372   // a set of parallel tasks (via the sampling that the [abortable]
  5373   // preclean phase did of EdenSpace, plus the [two] tasks of
  5374   // scanning the [two] survivor spaces. Further fine-grain
  5375   // parallelization of the scanning of the survivor spaces
  5376   // themselves, and of precleaning of the younger gen itself
  5377   // is deferred to the future.
  5378   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
  5380   // The dirty card rescan work is broken up into a "sequence"
  5381   // of parallel tasks (per constituent space) that are dynamically
  5382   // claimed by the parallel threads.
  5383   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
  5384   perm_space->initialize_sequential_subtasks_for_rescan(n_workers);
  5386   // It turns out that even when we're using 1 thread, doing the work in a
  5387   // separate thread causes wide variance in run times.  We can't help this
  5388   // in the multi-threaded case, but we special-case n=1 here to get
  5389   // repeatable measurements of the 1-thread overhead of the parallel code.
  5390   if (n_workers > 1) {
  5391     // Make refs discovery MT-safe
  5392     ReferenceProcessorMTMutator mt(ref_processor(), true);
  5393     workers->run_task(&tsk);
  5394   } else {
  5395     tsk.work(0);
  5397   gch->set_par_threads(0);  // 0 ==> non-parallel.
  5398   // restore, single-threaded for now, any preserved marks
  5399   // as a result of work_q overflow
  5400   restore_preserved_marks_if_any();
  5403 // Non-parallel version of remark
  5404 void CMSCollector::do_remark_non_parallel() {
  5405   ResourceMark rm;
  5406   HandleMark   hm;
  5407   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5408   MarkRefsIntoAndScanClosure
  5409     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
  5410              &_markStack, &_revisitStack, this,
  5411              false /* should_yield */, false /* not precleaning */);
  5412   MarkFromDirtyCardsClosure
  5413     markFromDirtyCardsClosure(this, _span,
  5414                               NULL,  // space is set further below
  5415                               &_markBitMap, &_markStack, &_revisitStack,
  5416                               &mrias_cl);
  5418     TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
  5419     // Iterate over the dirty cards, marking them precleaned, and
  5420     // setting the corresponding bits in the mod union table.
  5422       ModUnionClosure modUnionClosure(&_modUnionTable);
  5423       _ct->ct_bs()->dirty_card_iterate(
  5424                       _cmsGen->used_region(),
  5425                       &modUnionClosure);
  5426       _ct->ct_bs()->dirty_card_iterate(
  5427                       _permGen->used_region(),
  5428                       &modUnionClosure);
  5430     // Having transferred these marks into the modUnionTable, we just need
  5431     // to rescan the marked objects on the dirty cards in the modUnionTable.
  5432     // The initial marking may have been done during an asynchronous
  5433     // collection so there may be dirty bits in the mod-union table.
  5434     const int alignment =
  5435       CardTableModRefBS::card_size * BitsPerWord;
  5437       // ... First handle dirty cards in CMS gen
  5438       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
  5439       MemRegion ur = _cmsGen->used_region();
  5440       HeapWord* lb = ur.start();
  5441       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
  5442       MemRegion cms_span(lb, ub);
  5443       _modUnionTable.dirty_range_iterate_clear(cms_span,
  5444                                                &markFromDirtyCardsClosure);
  5445       verify_work_stacks_empty();
  5446       if (PrintCMSStatistics != 0) {
  5447         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
  5448           markFromDirtyCardsClosure.num_dirty_cards());
  5452       // .. and then repeat for dirty cards in perm gen
  5453       markFromDirtyCardsClosure.set_space(_permGen->cmsSpace());
  5454       MemRegion ur = _permGen->used_region();
  5455       HeapWord* lb = ur.start();
  5456       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
  5457       MemRegion perm_span(lb, ub);
  5458       _modUnionTable.dirty_range_iterate_clear(perm_span,
  5459                                                &markFromDirtyCardsClosure);
  5460       verify_work_stacks_empty();
  5461       if (PrintCMSStatistics != 0) {
  5462         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ",
  5463           markFromDirtyCardsClosure.num_dirty_cards());
  5467   if (VerifyDuringGC &&
  5468       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  5469     HandleMark hm;  // Discard invalid handles created during verification
  5470     Universe::verify(true);
  5473     TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
  5475     verify_work_stacks_empty();
  5477     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
  5478     gch->gen_process_strong_roots(_cmsGen->level(),
  5479                                   true,  // younger gens as roots
  5480                                   true,  // collecting perm gen
  5481                                   SharedHeap::ScanningOption(roots_scanning_options()),
  5482                                   NULL, &mrias_cl);
  5484   verify_work_stacks_empty();
  5485   // Restore evacuated mark words, if any, used for overflow list links
  5486   if (!CMSOverflowEarlyRestoration) {
  5487     restore_preserved_marks_if_any();
  5489   verify_overflow_empty();
  5492 ////////////////////////////////////////////////////////
  5493 // Parallel Reference Processing Task Proxy Class
  5494 ////////////////////////////////////////////////////////
  5495 class CMSRefProcTaskProxy: public AbstractGangTask {
  5496   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
  5497   CMSCollector*          _collector;
  5498   CMSBitMap*             _mark_bit_map;
  5499   MemRegion              _span;
  5500   OopTaskQueueSet*       _task_queues;
  5501   ParallelTaskTerminator _term;
  5502   ProcessTask&           _task;
  5504 public:
  5505   CMSRefProcTaskProxy(ProcessTask&     task,
  5506                       CMSCollector*    collector,
  5507                       const MemRegion& span,
  5508                       CMSBitMap*       mark_bit_map,
  5509                       int              total_workers,
  5510                       OopTaskQueueSet* task_queues):
  5511     AbstractGangTask("Process referents by policy in parallel"),
  5512     _task(task),
  5513     _collector(collector), _span(span), _mark_bit_map(mark_bit_map),
  5514     _task_queues(task_queues),
  5515     _term(total_workers, task_queues)
  5516     { }
  5518   OopTaskQueueSet* task_queues() { return _task_queues; }
  5520   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
  5522   ParallelTaskTerminator* terminator() { return &_term; }
  5524   void do_work_steal(int i,
  5525                      CMSParDrainMarkingStackClosure* drain,
  5526                      CMSParKeepAliveClosure* keep_alive,
  5527                      int* seed);
  5529   virtual void work(int i);
  5530 };
  5532 void CMSRefProcTaskProxy::work(int i) {
  5533   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
  5534                                         _mark_bit_map, work_queue(i));
  5535   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
  5536                                                  _mark_bit_map, work_queue(i));
  5537   CMSIsAliveClosure is_alive_closure(_mark_bit_map);
  5538   _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
  5539   if (_task.marks_oops_alive()) {
  5540     do_work_steal(i, &par_drain_stack, &par_keep_alive,
  5541                   _collector->hash_seed(i));
  5543   assert(work_queue(i)->size() == 0, "work_queue should be empty");
  5544   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
  5547 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
  5548   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
  5549   EnqueueTask& _task;
  5551 public:
  5552   CMSRefEnqueueTaskProxy(EnqueueTask& task)
  5553     : AbstractGangTask("Enqueue reference objects in parallel"),
  5554       _task(task)
  5555   { }
  5557   virtual void work(int i)
  5559     _task.work(i);
  5561 };
  5563 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
  5564   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
  5565    _collector(collector),
  5566    _span(span),
  5567    _bit_map(bit_map),
  5568    _work_queue(work_queue),
  5569    _mark_and_push(collector, span, bit_map, work_queue),
  5570    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
  5571                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
  5572 { }
  5574 // . see if we can share work_queues with ParNew? XXX
  5575 void CMSRefProcTaskProxy::do_work_steal(int i,
  5576   CMSParDrainMarkingStackClosure* drain,
  5577   CMSParKeepAliveClosure* keep_alive,
  5578   int* seed) {
  5579   OopTaskQueue* work_q = work_queue(i);
  5580   NOT_PRODUCT(int num_steals = 0;)
  5581   oop obj_to_scan;
  5582   size_t num_from_overflow_list =
  5583            MIN2((size_t)work_q->max_elems()/4,
  5584                 (size_t)ParGCDesiredObjsFromOverflowList);
  5586   while (true) {
  5587     // Completely finish any left over work from (an) earlier round(s)
  5588     drain->trim_queue(0);
  5589     // Now check if there's any work in the overflow list
  5590     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
  5591                                                 work_q)) {
  5592       // Found something in global overflow list;
  5593       // not yet ready to go stealing work from others.
  5594       // We'd like to assert(work_q->size() != 0, ...)
  5595       // because we just took work from the overflow list,
  5596       // but of course we can't, since all of that might have
  5597       // been already stolen from us.
  5598       continue;
  5600     // Verify that we have no work before we resort to stealing
  5601     assert(work_q->size() == 0, "Have work, shouldn't steal");
  5602     // Try to steal from other queues that have work
  5603     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
  5604       NOT_PRODUCT(num_steals++;)
  5605       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
  5606       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
  5607       // Do scanning work
  5608       obj_to_scan->oop_iterate(keep_alive);
  5609       // Loop around, finish this work, and try to steal some more
  5610     } else if (terminator()->offer_termination()) {
  5611       break;  // nirvana from the infinite cycle
  5614   NOT_PRODUCT(
  5615     if (PrintCMSStatistics != 0) {
  5616       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
  5621 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
  5623   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5624   WorkGang* workers = gch->workers();
  5625   assert(workers != NULL, "Need parallel worker threads.");
  5626   int n_workers = workers->total_workers();
  5627   CMSRefProcTaskProxy rp_task(task, &_collector,
  5628                               _collector.ref_processor()->span(),
  5629                               _collector.markBitMap(),
  5630                               n_workers, _collector.task_queues());
  5631   workers->run_task(&rp_task);
  5634 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
  5637   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5638   WorkGang* workers = gch->workers();
  5639   assert(workers != NULL, "Need parallel worker threads.");
  5640   CMSRefEnqueueTaskProxy enq_task(task);
  5641   workers->run_task(&enq_task);
  5644 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
  5646   ResourceMark rm;
  5647   HandleMark   hm;
  5648   ReferencePolicy* soft_ref_policy;
  5650   assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
  5651   // Process weak references.
  5652   if (clear_all_soft_refs) {
  5653     soft_ref_policy = new AlwaysClearPolicy();
  5654   } else {
  5655 #ifdef COMPILER2
  5656     soft_ref_policy = new LRUMaxHeapPolicy();
  5657 #else
  5658     soft_ref_policy = new LRUCurrentHeapPolicy();
  5659 #endif // COMPILER2
  5661   verify_work_stacks_empty();
  5663   ReferenceProcessor* rp = ref_processor();
  5664   assert(rp->span().equals(_span), "Spans should be equal");
  5665   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
  5666                                           &_markStack);
  5667   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
  5668                                 _span, &_markBitMap, &_markStack,
  5669                                 &cmsKeepAliveClosure);
  5671     TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
  5672     if (rp->processing_is_mt()) {
  5673       CMSRefProcTaskExecutor task_executor(*this);
  5674       rp->process_discovered_references(soft_ref_policy,
  5675                                         &_is_alive_closure,
  5676                                         &cmsKeepAliveClosure,
  5677                                         &cmsDrainMarkingStackClosure,
  5678                                         &task_executor);
  5679     } else {
  5680       rp->process_discovered_references(soft_ref_policy,
  5681                                         &_is_alive_closure,
  5682                                         &cmsKeepAliveClosure,
  5683                                         &cmsDrainMarkingStackClosure,
  5684                                         NULL);
  5686     verify_work_stacks_empty();
  5689   if (should_unload_classes()) {
  5691       TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
  5693       // Follow SystemDictionary roots and unload classes
  5694       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
  5696       // Follow CodeCache roots and unload any methods marked for unloading
  5697       CodeCache::do_unloading(&_is_alive_closure,
  5698                               &cmsKeepAliveClosure,
  5699                               purged_class);
  5701       cmsDrainMarkingStackClosure.do_void();
  5702       verify_work_stacks_empty();
  5704       // Update subklass/sibling/implementor links in KlassKlass descendants
  5705       assert(!_revisitStack.isEmpty(), "revisit stack should not be empty");
  5706       oop k;
  5707       while ((k = _revisitStack.pop()) != NULL) {
  5708         ((Klass*)(oopDesc*)k)->follow_weak_klass_links(
  5709                        &_is_alive_closure,
  5710                        &cmsKeepAliveClosure);
  5712       assert(!ClassUnloading ||
  5713              (_markStack.isEmpty() && overflow_list_is_empty()),
  5714              "Should not have found new reachable objects");
  5715       assert(_revisitStack.isEmpty(), "revisit stack should have been drained");
  5716       cmsDrainMarkingStackClosure.do_void();
  5717       verify_work_stacks_empty();
  5721       TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty);
  5722       // Now clean up stale oops in SymbolTable and StringTable
  5723       SymbolTable::unlink(&_is_alive_closure);
  5724       StringTable::unlink(&_is_alive_closure);
  5728   verify_work_stacks_empty();
  5729   // Restore any preserved marks as a result of mark stack or
  5730   // work queue overflow
  5731   restore_preserved_marks_if_any();  // done single-threaded for now
  5733   rp->set_enqueuing_is_done(true);
  5734   if (rp->processing_is_mt()) {
  5735     CMSRefProcTaskExecutor task_executor(*this);
  5736     rp->enqueue_discovered_references(&task_executor);
  5737   } else {
  5738     rp->enqueue_discovered_references(NULL);
  5740   rp->verify_no_references_recorded();
  5741   assert(!rp->discovery_enabled(), "should have been disabled");
  5743   // JVMTI object tagging is based on JNI weak refs. If any of these
  5744   // refs were cleared then JVMTI needs to update its maps and
  5745   // maybe post ObjectFrees to agents.
  5746   JvmtiExport::cms_ref_processing_epilogue();
  5749 #ifndef PRODUCT
  5750 void CMSCollector::check_correct_thread_executing() {
  5751   Thread* t = Thread::current();
  5752   // Only the VM thread or the CMS thread should be here.
  5753   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
  5754          "Unexpected thread type");
  5755   // If this is the vm thread, the foreground process
  5756   // should not be waiting.  Note that _foregroundGCIsActive is
  5757   // true while the foreground collector is waiting.
  5758   if (_foregroundGCShouldWait) {
  5759     // We cannot be the VM thread
  5760     assert(t->is_ConcurrentGC_thread(),
  5761            "Should be CMS thread");
  5762   } else {
  5763     // We can be the CMS thread only if we are in a stop-world
  5764     // phase of CMS collection.
  5765     if (t->is_ConcurrentGC_thread()) {
  5766       assert(_collectorState == InitialMarking ||
  5767              _collectorState == FinalMarking,
  5768              "Should be a stop-world phase");
  5769       // The CMS thread should be holding the CMS_token.
  5770       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  5771              "Potential interference with concurrently "
  5772              "executing VM thread");
  5776 #endif
  5778 void CMSCollector::sweep(bool asynch) {
  5779   assert(_collectorState == Sweeping, "just checking");
  5780   check_correct_thread_executing();
  5781   verify_work_stacks_empty();
  5782   verify_overflow_empty();
  5783   incrementSweepCount();
  5784   _sweep_timer.stop();
  5785   _sweep_estimate.sample(_sweep_timer.seconds());
  5786   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
  5788   // PermGen verification support: If perm gen sweeping is disabled in
  5789   // this cycle, we preserve the perm gen object "deadness" information
  5790   // in the perm_gen_verify_bit_map. In order to do that we traverse
  5791   // all blocks in perm gen and mark all dead objects.
  5792   if (verifying() && !should_unload_classes()) {
  5793     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
  5794            "Should have already been allocated");
  5795     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
  5796                                markBitMap(), perm_gen_verify_bit_map());
  5797     if (asynch) {
  5798       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
  5799                                bitMapLock());
  5800       _permGen->cmsSpace()->blk_iterate(&mdo);
  5801     } else {
  5802       // In the case of synchronous sweep, we already have
  5803       // the requisite locks/tokens.
  5804       _permGen->cmsSpace()->blk_iterate(&mdo);
  5808   if (asynch) {
  5809     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  5810     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
  5811     // First sweep the old gen then the perm gen
  5813       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
  5814                                bitMapLock());
  5815       sweepWork(_cmsGen, asynch);
  5818     // Now repeat for perm gen
  5819     if (should_unload_classes()) {
  5820       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
  5821                              bitMapLock());
  5822       sweepWork(_permGen, asynch);
  5825     // Update Universe::_heap_*_at_gc figures.
  5826     // We need all the free list locks to make the abstract state
  5827     // transition from Sweeping to Resetting. See detailed note
  5828     // further below.
  5830       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
  5831                                _permGen->freelistLock());
  5832       // Update heap occupancy information which is used as
  5833       // input to soft ref clearing policy at the next gc.
  5834       Universe::update_heap_info_at_gc();
  5835       _collectorState = Resizing;
  5837   } else {
  5838     // already have needed locks
  5839     sweepWork(_cmsGen,  asynch);
  5841     if (should_unload_classes()) {
  5842       sweepWork(_permGen, asynch);
  5844     // Update heap occupancy information which is used as
  5845     // input to soft ref clearing policy at the next gc.
  5846     Universe::update_heap_info_at_gc();
  5847     _collectorState = Resizing;
  5849   verify_work_stacks_empty();
  5850   verify_overflow_empty();
  5852   _sweep_timer.reset();
  5853   _sweep_timer.start();
  5855   update_time_of_last_gc(os::javaTimeMillis());
  5857   // NOTE on abstract state transitions:
  5858   // Mutators allocate-live and/or mark the mod-union table dirty
  5859   // based on the state of the collection.  The former is done in
  5860   // the interval [Marking, Sweeping] and the latter in the interval
  5861   // [Marking, Sweeping).  Thus the transitions into the Marking state
  5862   // and out of the Sweeping state must be synchronously visible
  5863   // globally to the mutators.
  5864   // The transition into the Marking state happens with the world
  5865   // stopped so the mutators will globally see it.  Sweeping is
  5866   // done asynchronously by the background collector so the transition
  5867   // from the Sweeping state to the Resizing state must be done
  5868   // under the freelistLock (as is the check for whether to
  5869   // allocate-live and whether to dirty the mod-union table).
  5870   assert(_collectorState == Resizing, "Change of collector state to"
  5871     " Resizing must be done under the freelistLocks (plural)");
  5873   // Now that sweeping has been completed, if the GCH's
  5874   // incremental_collection_will_fail flag is set, clear it,
  5875   // thus inviting a younger gen collection to promote into
  5876   // this generation. If such a promotion may still fail,
  5877   // the flag will be set again when a young collection is
  5878   // attempted.
  5879   // I think the incremental_collection_will_fail flag's use
  5880   // is specific to a 2 generation collection policy, so i'll
  5881   // assert that that's the configuration we are operating within.
  5882   // The use of the flag can and should be generalized appropriately
  5883   // in the future to deal with a general n-generation system.
  5885   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5886   assert(gch->collector_policy()->is_two_generation_policy(),
  5887          "Resetting of incremental_collection_will_fail flag"
  5888          " may be incorrect otherwise");
  5889   gch->clear_incremental_collection_will_fail();
  5890   gch->update_full_collections_completed(_collection_count_start);
  5893 // FIX ME!!! Looks like this belongs in CFLSpace, with
  5894 // CMSGen merely delegating to it.
  5895 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
  5896   double nearLargestPercent = 0.999;
  5897   HeapWord*  minAddr        = _cmsSpace->bottom();
  5898   HeapWord*  largestAddr    =
  5899     (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
  5900   if (largestAddr == 0) {
  5901     // The dictionary appears to be empty.  In this case
  5902     // try to coalesce at the end of the heap.
  5903     largestAddr = _cmsSpace->end();
  5905   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
  5906   size_t nearLargestOffset =
  5907     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
  5908   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
  5911 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
  5912   return addr >= _cmsSpace->nearLargestChunk();
  5915 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
  5916   return _cmsSpace->find_chunk_at_end();
  5919 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
  5920                                                     bool full) {
  5921   // The next lower level has been collected.  Gather any statistics
  5922   // that are of interest at this point.
  5923   if (!full && (current_level + 1) == level()) {
  5924     // Gather statistics on the young generation collection.
  5925     collector()->stats().record_gc0_end(used());
  5929 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
  5930   GenCollectedHeap* gch = GenCollectedHeap::heap();
  5931   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
  5932     "Wrong type of heap");
  5933   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
  5934     gch->gen_policy()->size_policy();
  5935   assert(sp->is_gc_cms_adaptive_size_policy(),
  5936     "Wrong type of size policy");
  5937   return sp;
  5940 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
  5941   if (PrintGCDetails && Verbose) {
  5942     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
  5944   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
  5945   _debug_collection_type =
  5946     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
  5947   if (PrintGCDetails && Verbose) {
  5948     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
  5952 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
  5953   bool asynch) {
  5954   // We iterate over the space(s) underlying this generation,
  5955   // checking the mark bit map to see if the bits corresponding
  5956   // to specific blocks are marked or not. Blocks that are
  5957   // marked are live and are not swept up. All remaining blocks
  5958   // are swept up, with coalescing on-the-fly as we sweep up
  5959   // contiguous free and/or garbage blocks:
  5960   // We need to ensure that the sweeper synchronizes with allocators
  5961   // and stop-the-world collectors. In particular, the following
  5962   // locks are used:
  5963   // . CMS token: if this is held, a stop the world collection cannot occur
  5964   // . freelistLock: if this is held no allocation can occur from this
  5965   //                 generation by another thread
  5966   // . bitMapLock: if this is held, no other thread can access or update
  5967   //
  5969   // Note that we need to hold the freelistLock if we use
  5970   // block iterate below; else the iterator might go awry if
  5971   // a mutator (or promotion) causes block contents to change
  5972   // (for instance if the allocator divvies up a block).
  5973   // If we hold the free list lock, for all practical purposes
  5974   // young generation GC's can't occur (they'll usually need to
  5975   // promote), so we might as well prevent all young generation
  5976   // GC's while we do a sweeping step. For the same reason, we might
  5977   // as well take the bit map lock for the entire duration
  5979   // check that we hold the requisite locks
  5980   assert(have_cms_token(), "Should hold cms token");
  5981   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
  5982          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
  5983         "Should possess CMS token to sweep");
  5984   assert_lock_strong(gen->freelistLock());
  5985   assert_lock_strong(bitMapLock());
  5987   assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
  5988   gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
  5989                                       _sweep_estimate.padded_average());
  5990   gen->setNearLargestChunk();
  5993     SweepClosure sweepClosure(this, gen, &_markBitMap,
  5994                             CMSYield && asynch);
  5995     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
  5996     // We need to free-up/coalesce garbage/blocks from a
  5997     // co-terminal free run. This is done in the SweepClosure
  5998     // destructor; so, do not remove this scope, else the
  5999     // end-of-sweep-census below will be off by a little bit.
  6001   gen->cmsSpace()->sweep_completed();
  6002   gen->cmsSpace()->endSweepFLCensus(sweepCount());
  6003   if (should_unload_classes()) {                // unloaded classes this cycle,
  6004     _concurrent_cycles_since_last_unload = 0;   // ... reset count
  6005   } else {                                      // did not unload classes,
  6006     _concurrent_cycles_since_last_unload++;     // ... increment count
  6010 // Reset CMS data structures (for now just the marking bit map)
  6011 // preparatory for the next cycle.
  6012 void CMSCollector::reset(bool asynch) {
  6013   GenCollectedHeap* gch = GenCollectedHeap::heap();
  6014   CMSAdaptiveSizePolicy* sp = size_policy();
  6015   AdaptiveSizePolicyOutput(sp, gch->total_collections());
  6016   if (asynch) {
  6017     CMSTokenSyncWithLocks ts(true, bitMapLock());
  6019     // If the state is not "Resetting", the foreground  thread
  6020     // has done a collection and the resetting.
  6021     if (_collectorState != Resetting) {
  6022       assert(_collectorState == Idling, "The state should only change"
  6023         " because the foreground collector has finished the collection");
  6024       return;
  6027     // Clear the mark bitmap (no grey objects to start with)
  6028     // for the next cycle.
  6029     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  6030     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
  6032     HeapWord* curAddr = _markBitMap.startWord();
  6033     while (curAddr < _markBitMap.endWord()) {
  6034       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
  6035       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
  6036       _markBitMap.clear_large_range(chunk);
  6037       if (ConcurrentMarkSweepThread::should_yield() &&
  6038           !foregroundGCIsActive() &&
  6039           CMSYield) {
  6040         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  6041                "CMS thread should hold CMS token");
  6042         assert_lock_strong(bitMapLock());
  6043         bitMapLock()->unlock();
  6044         ConcurrentMarkSweepThread::desynchronize(true);
  6045         ConcurrentMarkSweepThread::acknowledge_yield_request();
  6046         stopTimer();
  6047         if (PrintCMSStatistics != 0) {
  6048           incrementYields();
  6050         icms_wait();
  6052         // See the comment in coordinator_yield()
  6053         for (unsigned i = 0; i < CMSYieldSleepCount &&
  6054                          ConcurrentMarkSweepThread::should_yield() &&
  6055                          !CMSCollector::foregroundGCIsActive(); ++i) {
  6056           os::sleep(Thread::current(), 1, false);
  6057           ConcurrentMarkSweepThread::acknowledge_yield_request();
  6060         ConcurrentMarkSweepThread::synchronize(true);
  6061         bitMapLock()->lock_without_safepoint_check();
  6062         startTimer();
  6064       curAddr = chunk.end();
  6066     _collectorState = Idling;
  6067   } else {
  6068     // already have the lock
  6069     assert(_collectorState == Resetting, "just checking");
  6070     assert_lock_strong(bitMapLock());
  6071     _markBitMap.clear_all();
  6072     _collectorState = Idling;
  6075   // Stop incremental mode after a cycle completes, so that any future cycles
  6076   // are triggered by allocation.
  6077   stop_icms();
  6079   NOT_PRODUCT(
  6080     if (RotateCMSCollectionTypes) {
  6081       _cmsGen->rotate_debug_collection_type();
  6086 void CMSCollector::do_CMS_operation(CMS_op_type op) {
  6087   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  6088   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  6089   TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
  6090   TraceCollectorStats tcs(counters());
  6092   switch (op) {
  6093     case CMS_op_checkpointRootsInitial: {
  6094       checkpointRootsInitial(true);       // asynch
  6095       if (PrintGC) {
  6096         _cmsGen->printOccupancy("initial-mark");
  6098       break;
  6100     case CMS_op_checkpointRootsFinal: {
  6101       checkpointRootsFinal(true,    // asynch
  6102                            false,   // !clear_all_soft_refs
  6103                            false);  // !init_mark_was_synchronous
  6104       if (PrintGC) {
  6105         _cmsGen->printOccupancy("remark");
  6107       break;
  6109     default:
  6110       fatal("No such CMS_op");
  6114 #ifndef PRODUCT
  6115 size_t const CMSCollector::skip_header_HeapWords() {
  6116   return FreeChunk::header_size();
  6119 // Try and collect here conditions that should hold when
  6120 // CMS thread is exiting. The idea is that the foreground GC
  6121 // thread should not be blocked if it wants to terminate
  6122 // the CMS thread and yet continue to run the VM for a while
  6123 // after that.
  6124 void CMSCollector::verify_ok_to_terminate() const {
  6125   assert(Thread::current()->is_ConcurrentGC_thread(),
  6126          "should be called by CMS thread");
  6127   assert(!_foregroundGCShouldWait, "should be false");
  6128   // We could check here that all the various low-level locks
  6129   // are not held by the CMS thread, but that is overkill; see
  6130   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
  6131   // is checked.
  6133 #endif
  6135 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
  6136   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
  6137          "missing Printezis mark?");
  6138   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
  6139   size_t size = pointer_delta(nextOneAddr + 1, addr);
  6140   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  6141          "alignment problem");
  6142   assert(size >= 3, "Necessary for Printezis marks to work");
  6143   return size;
  6146 // A variant of the above (block_size_using_printezis_bits()) except
  6147 // that we return 0 if the P-bits are not yet set.
  6148 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
  6149   if (_markBitMap.isMarked(addr)) {
  6150     assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?");
  6151     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
  6152     size_t size = pointer_delta(nextOneAddr + 1, addr);
  6153     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  6154            "alignment problem");
  6155     assert(size >= 3, "Necessary for Printezis marks to work");
  6156     return size;
  6157   } else {
  6158     assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?");
  6159     return 0;
  6163 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
  6164   size_t sz = 0;
  6165   oop p = (oop)addr;
  6166   if (p->klass() != NULL && p->is_parsable()) {
  6167     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
  6168   } else {
  6169     sz = block_size_using_printezis_bits(addr);
  6171   assert(sz > 0, "size must be nonzero");
  6172   HeapWord* next_block = addr + sz;
  6173   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
  6174                                              CardTableModRefBS::card_size);
  6175   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
  6176          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
  6177          "must be different cards");
  6178   return next_card;
  6182 // CMS Bit Map Wrapper /////////////////////////////////////////
  6184 // Construct a CMS bit map infrastructure, but don't create the
  6185 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
  6186 // further below.
  6187 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
  6188   _bm(NULL,0),
  6189   _shifter(shifter),
  6190   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
  6192   _bmStartWord = 0;
  6193   _bmWordSize  = 0;
  6196 bool CMSBitMap::allocate(MemRegion mr) {
  6197   _bmStartWord = mr.start();
  6198   _bmWordSize  = mr.word_size();
  6199   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
  6200                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
  6201   if (!brs.is_reserved()) {
  6202     warning("CMS bit map allocation failure");
  6203     return false;
  6205   // For now we'll just commit all of the bit map up fromt.
  6206   // Later on we'll try to be more parsimonious with swap.
  6207   if (!_virtual_space.initialize(brs, brs.size())) {
  6208     warning("CMS bit map backing store failure");
  6209     return false;
  6211   assert(_virtual_space.committed_size() == brs.size(),
  6212          "didn't reserve backing store for all of CMS bit map?");
  6213   _bm.set_map((uintptr_t*)_virtual_space.low());
  6214   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
  6215          _bmWordSize, "inconsistency in bit map sizing");
  6216   _bm.set_size(_bmWordSize >> _shifter);
  6218   // bm.clear(); // can we rely on getting zero'd memory? verify below
  6219   assert(isAllClear(),
  6220          "Expected zero'd memory from ReservedSpace constructor");
  6221   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
  6222          "consistency check");
  6223   return true;
  6226 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
  6227   HeapWord *next_addr, *end_addr, *last_addr;
  6228   assert_locked();
  6229   assert(covers(mr), "out-of-range error");
  6230   // XXX assert that start and end are appropriately aligned
  6231   for (next_addr = mr.start(), end_addr = mr.end();
  6232        next_addr < end_addr; next_addr = last_addr) {
  6233     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
  6234     last_addr = dirty_region.end();
  6235     if (!dirty_region.is_empty()) {
  6236       cl->do_MemRegion(dirty_region);
  6237     } else {
  6238       assert(last_addr == end_addr, "program logic");
  6239       return;
  6244 #ifndef PRODUCT
  6245 void CMSBitMap::assert_locked() const {
  6246   CMSLockVerifier::assert_locked(lock());
  6249 bool CMSBitMap::covers(MemRegion mr) const {
  6250   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
  6251   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
  6252          "size inconsistency");
  6253   return (mr.start() >= _bmStartWord) &&
  6254          (mr.end()   <= endWord());
  6257 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
  6258     return (start >= _bmStartWord && (start + size) <= endWord());
  6261 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
  6262   // verify that there are no 1 bits in the interval [left, right)
  6263   FalseBitMapClosure falseBitMapClosure;
  6264   iterate(&falseBitMapClosure, left, right);
  6267 void CMSBitMap::region_invariant(MemRegion mr)
  6269   assert_locked();
  6270   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
  6271   assert(!mr.is_empty(), "unexpected empty region");
  6272   assert(covers(mr), "mr should be covered by bit map");
  6273   // convert address range into offset range
  6274   size_t start_ofs = heapWordToOffset(mr.start());
  6275   // Make sure that end() is appropriately aligned
  6276   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
  6277                         (1 << (_shifter+LogHeapWordSize))),
  6278          "Misaligned mr.end()");
  6279   size_t end_ofs   = heapWordToOffset(mr.end());
  6280   assert(end_ofs > start_ofs, "Should mark at least one bit");
  6283 #endif
  6285 bool CMSMarkStack::allocate(size_t size) {
  6286   // allocate a stack of the requisite depth
  6287   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
  6288                    size * sizeof(oop)));
  6289   if (!rs.is_reserved()) {
  6290     warning("CMSMarkStack allocation failure");
  6291     return false;
  6293   if (!_virtual_space.initialize(rs, rs.size())) {
  6294     warning("CMSMarkStack backing store failure");
  6295     return false;
  6297   assert(_virtual_space.committed_size() == rs.size(),
  6298          "didn't reserve backing store for all of CMS stack?");
  6299   _base = (oop*)(_virtual_space.low());
  6300   _index = 0;
  6301   _capacity = size;
  6302   NOT_PRODUCT(_max_depth = 0);
  6303   return true;
  6306 // XXX FIX ME !!! In the MT case we come in here holding a
  6307 // leaf lock. For printing we need to take a further lock
  6308 // which has lower rank. We need to recallibrate the two
  6309 // lock-ranks involved in order to be able to rpint the
  6310 // messages below. (Or defer the printing to the caller.
  6311 // For now we take the expedient path of just disabling the
  6312 // messages for the problematic case.)
  6313 void CMSMarkStack::expand() {
  6314   assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
  6315   if (_capacity == CMSMarkStackSizeMax) {
  6316     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
  6317       // We print a warning message only once per CMS cycle.
  6318       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
  6320     return;
  6322   // Double capacity if possible
  6323   size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
  6324   // Do not give up existing stack until we have managed to
  6325   // get the double capacity that we desired.
  6326   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
  6327                    new_capacity * sizeof(oop)));
  6328   if (rs.is_reserved()) {
  6329     // Release the backing store associated with old stack
  6330     _virtual_space.release();
  6331     // Reinitialize virtual space for new stack
  6332     if (!_virtual_space.initialize(rs, rs.size())) {
  6333       fatal("Not enough swap for expanded marking stack");
  6335     _base = (oop*)(_virtual_space.low());
  6336     _index = 0;
  6337     _capacity = new_capacity;
  6338   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
  6339     // Failed to double capacity, continue;
  6340     // we print a detail message only once per CMS cycle.
  6341     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
  6342             SIZE_FORMAT"K",
  6343             _capacity / K, new_capacity / K);
  6348 // Closures
  6349 // XXX: there seems to be a lot of code  duplication here;
  6350 // should refactor and consolidate common code.
  6352 // This closure is used to mark refs into the CMS generation in
  6353 // the CMS bit map. Called at the first checkpoint. This closure
  6354 // assumes that we do not need to re-mark dirty cards; if the CMS
  6355 // generation on which this is used is not an oldest (modulo perm gen)
  6356 // generation then this will lose younger_gen cards!
  6358 MarkRefsIntoClosure::MarkRefsIntoClosure(
  6359   MemRegion span, CMSBitMap* bitMap, bool should_do_nmethods):
  6360     _span(span),
  6361     _bitMap(bitMap),
  6362     _should_do_nmethods(should_do_nmethods)
  6364     assert(_ref_processor == NULL, "deliberately left NULL");
  6365     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
  6368 void MarkRefsIntoClosure::do_oop(oop obj) {
  6369   // if p points into _span, then mark corresponding bit in _markBitMap
  6370   assert(obj->is_oop(), "expected an oop");
  6371   HeapWord* addr = (HeapWord*)obj;
  6372   if (_span.contains(addr)) {
  6373     // this should be made more efficient
  6374     _bitMap->mark(addr);
  6378 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
  6379 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
  6381 // A variant of the above, used for CMS marking verification.
  6382 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
  6383   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm,
  6384   bool should_do_nmethods):
  6385     _span(span),
  6386     _verification_bm(verification_bm),
  6387     _cms_bm(cms_bm),
  6388     _should_do_nmethods(should_do_nmethods) {
  6389     assert(_ref_processor == NULL, "deliberately left NULL");
  6390     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
  6393 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
  6394   // if p points into _span, then mark corresponding bit in _markBitMap
  6395   assert(obj->is_oop(), "expected an oop");
  6396   HeapWord* addr = (HeapWord*)obj;
  6397   if (_span.contains(addr)) {
  6398     _verification_bm->mark(addr);
  6399     if (!_cms_bm->isMarked(addr)) {
  6400       oop(addr)->print();
  6401       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
  6402       fatal("... aborting");
  6407 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
  6408 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
  6410 //////////////////////////////////////////////////
  6411 // MarkRefsIntoAndScanClosure
  6412 //////////////////////////////////////////////////
  6414 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
  6415                                                        ReferenceProcessor* rp,
  6416                                                        CMSBitMap* bit_map,
  6417                                                        CMSBitMap* mod_union_table,
  6418                                                        CMSMarkStack*  mark_stack,
  6419                                                        CMSMarkStack*  revisit_stack,
  6420                                                        CMSCollector* collector,
  6421                                                        bool should_yield,
  6422                                                        bool concurrent_precleaning):
  6423   _collector(collector),
  6424   _span(span),
  6425   _bit_map(bit_map),
  6426   _mark_stack(mark_stack),
  6427   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
  6428                       mark_stack, revisit_stack, concurrent_precleaning),
  6429   _yield(should_yield),
  6430   _concurrent_precleaning(concurrent_precleaning),
  6431   _freelistLock(NULL)
  6433   _ref_processor = rp;
  6434   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
  6437 // This closure is used to mark refs into the CMS generation at the
  6438 // second (final) checkpoint, and to scan and transitively follow
  6439 // the unmarked oops. It is also used during the concurrent precleaning
  6440 // phase while scanning objects on dirty cards in the CMS generation.
  6441 // The marks are made in the marking bit map and the marking stack is
  6442 // used for keeping the (newly) grey objects during the scan.
  6443 // The parallel version (Par_...) appears further below.
  6444 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
  6445   if (obj != NULL) {
  6446     assert(obj->is_oop(), "expected an oop");
  6447     HeapWord* addr = (HeapWord*)obj;
  6448     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
  6449     assert(_collector->overflow_list_is_empty(),
  6450            "overflow list should be empty");
  6451     if (_span.contains(addr) &&
  6452         !_bit_map->isMarked(addr)) {
  6453       // mark bit map (object is now grey)
  6454       _bit_map->mark(addr);
  6455       // push on marking stack (stack should be empty), and drain the
  6456       // stack by applying this closure to the oops in the oops popped
  6457       // from the stack (i.e. blacken the grey objects)
  6458       bool res = _mark_stack->push(obj);
  6459       assert(res, "Should have space to push on empty stack");
  6460       do {
  6461         oop new_oop = _mark_stack->pop();
  6462         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
  6463         assert(new_oop->is_parsable(), "Found unparsable oop");
  6464         assert(_bit_map->isMarked((HeapWord*)new_oop),
  6465                "only grey objects on this stack");
  6466         // iterate over the oops in this oop, marking and pushing
  6467         // the ones in CMS heap (i.e. in _span).
  6468         new_oop->oop_iterate(&_pushAndMarkClosure);
  6469         // check if it's time to yield
  6470         do_yield_check();
  6471       } while (!_mark_stack->isEmpty() ||
  6472                (!_concurrent_precleaning && take_from_overflow_list()));
  6473         // if marking stack is empty, and we are not doing this
  6474         // during precleaning, then check the overflow list
  6476     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
  6477     assert(_collector->overflow_list_is_empty(),
  6478            "overflow list was drained above");
  6479     // We could restore evacuated mark words, if any, used for
  6480     // overflow list links here because the overflow list is
  6481     // provably empty here. That would reduce the maximum
  6482     // size requirements for preserved_{oop,mark}_stack.
  6483     // But we'll just postpone it until we are all done
  6484     // so we can just stream through.
  6485     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
  6486       _collector->restore_preserved_marks_if_any();
  6487       assert(_collector->no_preserved_marks(), "No preserved marks");
  6489     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
  6490            "All preserved marks should have been restored above");
  6494 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
  6495 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
  6497 void MarkRefsIntoAndScanClosure::do_yield_work() {
  6498   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  6499          "CMS thread should hold CMS token");
  6500   assert_lock_strong(_freelistLock);
  6501   assert_lock_strong(_bit_map->lock());
  6502   // relinquish the free_list_lock and bitMaplock()
  6503   _bit_map->lock()->unlock();
  6504   _freelistLock->unlock();
  6505   ConcurrentMarkSweepThread::desynchronize(true);
  6506   ConcurrentMarkSweepThread::acknowledge_yield_request();
  6507   _collector->stopTimer();
  6508   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  6509   if (PrintCMSStatistics != 0) {
  6510     _collector->incrementYields();
  6512   _collector->icms_wait();
  6514   // See the comment in coordinator_yield()
  6515   for (unsigned i = 0;
  6516        i < CMSYieldSleepCount &&
  6517        ConcurrentMarkSweepThread::should_yield() &&
  6518        !CMSCollector::foregroundGCIsActive();
  6519        ++i) {
  6520     os::sleep(Thread::current(), 1, false);
  6521     ConcurrentMarkSweepThread::acknowledge_yield_request();
  6524   ConcurrentMarkSweepThread::synchronize(true);
  6525   _freelistLock->lock_without_safepoint_check();
  6526   _bit_map->lock()->lock_without_safepoint_check();
  6527   _collector->startTimer();
  6530 ///////////////////////////////////////////////////////////
  6531 // Par_MarkRefsIntoAndScanClosure: a parallel version of
  6532 //                                 MarkRefsIntoAndScanClosure
  6533 ///////////////////////////////////////////////////////////
  6534 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
  6535   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
  6536   CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack*  revisit_stack):
  6537   _span(span),
  6538   _bit_map(bit_map),
  6539   _work_queue(work_queue),
  6540   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
  6541                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
  6542   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue,
  6543                           revisit_stack)
  6545   _ref_processor = rp;
  6546   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
  6549 // This closure is used to mark refs into the CMS generation at the
  6550 // second (final) checkpoint, and to scan and transitively follow
  6551 // the unmarked oops. The marks are made in the marking bit map and
  6552 // the work_queue is used for keeping the (newly) grey objects during
  6553 // the scan phase whence they are also available for stealing by parallel
  6554 // threads. Since the marking bit map is shared, updates are
  6555 // synchronized (via CAS).
  6556 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
  6557   if (obj != NULL) {
  6558     // Ignore mark word because this could be an already marked oop
  6559     // that may be chained at the end of the overflow list.
  6560     assert(obj->is_oop(), "expected an oop");
  6561     HeapWord* addr = (HeapWord*)obj;
  6562     if (_span.contains(addr) &&
  6563         !_bit_map->isMarked(addr)) {
  6564       // mark bit map (object will become grey):
  6565       // It is possible for several threads to be
  6566       // trying to "claim" this object concurrently;
  6567       // the unique thread that succeeds in marking the
  6568       // object first will do the subsequent push on
  6569       // to the work queue (or overflow list).
  6570       if (_bit_map->par_mark(addr)) {
  6571         // push on work_queue (which may not be empty), and trim the
  6572         // queue to an appropriate length by applying this closure to
  6573         // the oops in the oops popped from the stack (i.e. blacken the
  6574         // grey objects)
  6575         bool res = _work_queue->push(obj);
  6576         assert(res, "Low water mark should be less than capacity?");
  6577         trim_queue(_low_water_mark);
  6578       } // Else, another thread claimed the object
  6583 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
  6584 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
  6586 // This closure is used to rescan the marked objects on the dirty cards
  6587 // in the mod union table and the card table proper.
  6588 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
  6589   oop p, MemRegion mr) {
  6591   size_t size = 0;
  6592   HeapWord* addr = (HeapWord*)p;
  6593   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
  6594   assert(_span.contains(addr), "we are scanning the CMS generation");
  6595   // check if it's time to yield
  6596   if (do_yield_check()) {
  6597     // We yielded for some foreground stop-world work,
  6598     // and we have been asked to abort this ongoing preclean cycle.
  6599     return 0;
  6601   if (_bitMap->isMarked(addr)) {
  6602     // it's marked; is it potentially uninitialized?
  6603     if (p->klass() != NULL) {
  6604       if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
  6605         // Signal precleaning to redirty the card since
  6606         // the klass pointer is already installed.
  6607         assert(size == 0, "Initial value");
  6608       } else {
  6609         assert(p->is_parsable(), "must be parsable.");
  6610         // an initialized object; ignore mark word in verification below
  6611         // since we are running concurrent with mutators
  6612         assert(p->is_oop(true), "should be an oop");
  6613         if (p->is_objArray()) {
  6614           // objArrays are precisely marked; restrict scanning
  6615           // to dirty cards only.
  6616           size = p->oop_iterate(_scanningClosure, mr);
  6617           assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  6618                  "adjustObjectSize should be the identity for array sizes, "
  6619                  "which are necessarily larger than minimum object size of "
  6620                  "two heap words");
  6621         } else {
  6622           // A non-array may have been imprecisely marked; we need
  6623           // to scan object in its entirety.
  6624           size = CompactibleFreeListSpace::adjustObjectSize(
  6625                    p->oop_iterate(_scanningClosure));
  6627         #ifdef DEBUG
  6628           size_t direct_size =
  6629             CompactibleFreeListSpace::adjustObjectSize(p->size());
  6630           assert(size == direct_size, "Inconsistency in size");
  6631           assert(size >= 3, "Necessary for Printezis marks to work");
  6632           if (!_bitMap->isMarked(addr+1)) {
  6633             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
  6634           } else {
  6635             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
  6636             assert(_bitMap->isMarked(addr+size-1),
  6637                    "inconsistent Printezis mark");
  6639         #endif // DEBUG
  6641     } else {
  6642       // an unitialized object
  6643       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
  6644       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
  6645       size = pointer_delta(nextOneAddr + 1, addr);
  6646       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  6647              "alignment problem");
  6648       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
  6649       // will dirty the card when the klass pointer is installed in the
  6650       // object (signalling the completion of initialization).
  6652   } else {
  6653     // Either a not yet marked object or an uninitialized object
  6654     if (p->klass() == NULL || !p->is_parsable()) {
  6655       // An uninitialized object, skip to the next card, since
  6656       // we may not be able to read its P-bits yet.
  6657       assert(size == 0, "Initial value");
  6658     } else {
  6659       // An object not (yet) reached by marking: we merely need to
  6660       // compute its size so as to go look at the next block.
  6661       assert(p->is_oop(true), "should be an oop");
  6662       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
  6665   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
  6666   return size;
  6669 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
  6670   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  6671          "CMS thread should hold CMS token");
  6672   assert_lock_strong(_freelistLock);
  6673   assert_lock_strong(_bitMap->lock());
  6674   // relinquish the free_list_lock and bitMaplock()
  6675   _bitMap->lock()->unlock();
  6676   _freelistLock->unlock();
  6677   ConcurrentMarkSweepThread::desynchronize(true);
  6678   ConcurrentMarkSweepThread::acknowledge_yield_request();
  6679   _collector->stopTimer();
  6680   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  6681   if (PrintCMSStatistics != 0) {
  6682     _collector->incrementYields();
  6684   _collector->icms_wait();
  6686   // See the comment in coordinator_yield()
  6687   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6688                    ConcurrentMarkSweepThread::should_yield() &&
  6689                    !CMSCollector::foregroundGCIsActive(); ++i) {
  6690     os::sleep(Thread::current(), 1, false);
  6691     ConcurrentMarkSweepThread::acknowledge_yield_request();
  6694   ConcurrentMarkSweepThread::synchronize(true);
  6695   _freelistLock->lock_without_safepoint_check();
  6696   _bitMap->lock()->lock_without_safepoint_check();
  6697   _collector->startTimer();
  6701 //////////////////////////////////////////////////////////////////
  6702 // SurvivorSpacePrecleanClosure
  6703 //////////////////////////////////////////////////////////////////
  6704 // This (single-threaded) closure is used to preclean the oops in
  6705 // the survivor spaces.
  6706 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
  6708   HeapWord* addr = (HeapWord*)p;
  6709   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
  6710   assert(!_span.contains(addr), "we are scanning the survivor spaces");
  6711   assert(p->klass() != NULL, "object should be initializd");
  6712   assert(p->is_parsable(), "must be parsable.");
  6713   // an initialized object; ignore mark word in verification below
  6714   // since we are running concurrent with mutators
  6715   assert(p->is_oop(true), "should be an oop");
  6716   // Note that we do not yield while we iterate over
  6717   // the interior oops of p, pushing the relevant ones
  6718   // on our marking stack.
  6719   size_t size = p->oop_iterate(_scanning_closure);
  6720   do_yield_check();
  6721   // Observe that below, we do not abandon the preclean
  6722   // phase as soon as we should; rather we empty the
  6723   // marking stack before returning. This is to satisfy
  6724   // some existing assertions. In general, it may be a
  6725   // good idea to abort immediately and complete the marking
  6726   // from the grey objects at a later time.
  6727   while (!_mark_stack->isEmpty()) {
  6728     oop new_oop = _mark_stack->pop();
  6729     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
  6730     assert(new_oop->is_parsable(), "Found unparsable oop");
  6731     assert(_bit_map->isMarked((HeapWord*)new_oop),
  6732            "only grey objects on this stack");
  6733     // iterate over the oops in this oop, marking and pushing
  6734     // the ones in CMS heap (i.e. in _span).
  6735     new_oop->oop_iterate(_scanning_closure);
  6736     // check if it's time to yield
  6737     do_yield_check();
  6739   unsigned int after_count =
  6740     GenCollectedHeap::heap()->total_collections();
  6741   bool abort = (_before_count != after_count) ||
  6742                _collector->should_abort_preclean();
  6743   return abort ? 0 : size;
  6746 void SurvivorSpacePrecleanClosure::do_yield_work() {
  6747   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  6748          "CMS thread should hold CMS token");
  6749   assert_lock_strong(_bit_map->lock());
  6750   // Relinquish the bit map lock
  6751   _bit_map->lock()->unlock();
  6752   ConcurrentMarkSweepThread::desynchronize(true);
  6753   ConcurrentMarkSweepThread::acknowledge_yield_request();
  6754   _collector->stopTimer();
  6755   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  6756   if (PrintCMSStatistics != 0) {
  6757     _collector->incrementYields();
  6759   _collector->icms_wait();
  6761   // See the comment in coordinator_yield()
  6762   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6763                        ConcurrentMarkSweepThread::should_yield() &&
  6764                        !CMSCollector::foregroundGCIsActive(); ++i) {
  6765     os::sleep(Thread::current(), 1, false);
  6766     ConcurrentMarkSweepThread::acknowledge_yield_request();
  6769   ConcurrentMarkSweepThread::synchronize(true);
  6770   _bit_map->lock()->lock_without_safepoint_check();
  6771   _collector->startTimer();
  6774 // This closure is used to rescan the marked objects on the dirty cards
  6775 // in the mod union table and the card table proper. In the parallel
  6776 // case, although the bitMap is shared, we do a single read so the
  6777 // isMarked() query is "safe".
  6778 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
  6779   // Ignore mark word because we are running concurrent with mutators
  6780   assert(p->is_oop_or_null(true), "expected an oop or null");
  6781   HeapWord* addr = (HeapWord*)p;
  6782   assert(_span.contains(addr), "we are scanning the CMS generation");
  6783   bool is_obj_array = false;
  6784   #ifdef DEBUG
  6785     if (!_parallel) {
  6786       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
  6787       assert(_collector->overflow_list_is_empty(),
  6788              "overflow list should be empty");
  6791   #endif // DEBUG
  6792   if (_bit_map->isMarked(addr)) {
  6793     // Obj arrays are precisely marked, non-arrays are not;
  6794     // so we scan objArrays precisely and non-arrays in their
  6795     // entirety.
  6796     if (p->is_objArray()) {
  6797       is_obj_array = true;
  6798       if (_parallel) {
  6799         p->oop_iterate(_par_scan_closure, mr);
  6800       } else {
  6801         p->oop_iterate(_scan_closure, mr);
  6803     } else {
  6804       if (_parallel) {
  6805         p->oop_iterate(_par_scan_closure);
  6806       } else {
  6807         p->oop_iterate(_scan_closure);
  6811   #ifdef DEBUG
  6812     if (!_parallel) {
  6813       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
  6814       assert(_collector->overflow_list_is_empty(),
  6815              "overflow list should be empty");
  6818   #endif // DEBUG
  6819   return is_obj_array;
  6822 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
  6823                         MemRegion span,
  6824                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
  6825                         CMSMarkStack*  revisitStack,
  6826                         bool should_yield, bool verifying):
  6827   _collector(collector),
  6828   _span(span),
  6829   _bitMap(bitMap),
  6830   _mut(&collector->_modUnionTable),
  6831   _markStack(markStack),
  6832   _revisitStack(revisitStack),
  6833   _yield(should_yield),
  6834   _skipBits(0)
  6836   assert(_markStack->isEmpty(), "stack should be empty");
  6837   _finger = _bitMap->startWord();
  6838   _threshold = _finger;
  6839   assert(_collector->_restart_addr == NULL, "Sanity check");
  6840   assert(_span.contains(_finger), "Out of bounds _finger?");
  6841   DEBUG_ONLY(_verifying = verifying;)
  6844 void MarkFromRootsClosure::reset(HeapWord* addr) {
  6845   assert(_markStack->isEmpty(), "would cause duplicates on stack");
  6846   assert(_span.contains(addr), "Out of bounds _finger?");
  6847   _finger = addr;
  6848   _threshold = (HeapWord*)round_to(
  6849                  (intptr_t)_finger, CardTableModRefBS::card_size);
  6852 // Should revisit to see if this should be restructured for
  6853 // greater efficiency.
  6854 void MarkFromRootsClosure::do_bit(size_t offset) {
  6855   if (_skipBits > 0) {
  6856     _skipBits--;
  6857     return;
  6859   // convert offset into a HeapWord*
  6860   HeapWord* addr = _bitMap->startWord() + offset;
  6861   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
  6862          "address out of range");
  6863   assert(_bitMap->isMarked(addr), "tautology");
  6864   if (_bitMap->isMarked(addr+1)) {
  6865     // this is an allocated but not yet initialized object
  6866     assert(_skipBits == 0, "tautology");
  6867     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
  6868     oop p = oop(addr);
  6869     if (p->klass() == NULL || !p->is_parsable()) {
  6870       DEBUG_ONLY(if (!_verifying) {)
  6871         // We re-dirty the cards on which this object lies and increase
  6872         // the _threshold so that we'll come back to scan this object
  6873         // during the preclean or remark phase. (CMSCleanOnEnter)
  6874         if (CMSCleanOnEnter) {
  6875           size_t sz = _collector->block_size_using_printezis_bits(addr);
  6876           HeapWord* start_card_addr = (HeapWord*)round_down(
  6877                                          (intptr_t)addr, CardTableModRefBS::card_size);
  6878           HeapWord* end_card_addr   = (HeapWord*)round_to(
  6879                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
  6880           MemRegion redirty_range = MemRegion(start_card_addr, end_card_addr);
  6881           assert(!redirty_range.is_empty(), "Arithmetical tautology");
  6882           // Bump _threshold to end_card_addr; note that
  6883           // _threshold cannot possibly exceed end_card_addr, anyhow.
  6884           // This prevents future clearing of the card as the scan proceeds
  6885           // to the right.
  6886           assert(_threshold <= end_card_addr,
  6887                  "Because we are just scanning into this object");
  6888           if (_threshold < end_card_addr) {
  6889             _threshold = end_card_addr;
  6891           if (p->klass() != NULL) {
  6892             // Redirty the range of cards...
  6893             _mut->mark_range(redirty_range);
  6894           } // ...else the setting of klass will dirty the card anyway.
  6896       DEBUG_ONLY(})
  6897       return;
  6900   scanOopsInOop(addr);
  6903 // We take a break if we've been at this for a while,
  6904 // so as to avoid monopolizing the locks involved.
  6905 void MarkFromRootsClosure::do_yield_work() {
  6906   // First give up the locks, then yield, then re-lock
  6907   // We should probably use a constructor/destructor idiom to
  6908   // do this unlock/lock or modify the MutexUnlocker class to
  6909   // serve our purpose. XXX
  6910   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  6911          "CMS thread should hold CMS token");
  6912   assert_lock_strong(_bitMap->lock());
  6913   _bitMap->lock()->unlock();
  6914   ConcurrentMarkSweepThread::desynchronize(true);
  6915   ConcurrentMarkSweepThread::acknowledge_yield_request();
  6916   _collector->stopTimer();
  6917   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  6918   if (PrintCMSStatistics != 0) {
  6919     _collector->incrementYields();
  6921   _collector->icms_wait();
  6923   // See the comment in coordinator_yield()
  6924   for (unsigned i = 0; i < CMSYieldSleepCount &&
  6925                        ConcurrentMarkSweepThread::should_yield() &&
  6926                        !CMSCollector::foregroundGCIsActive(); ++i) {
  6927     os::sleep(Thread::current(), 1, false);
  6928     ConcurrentMarkSweepThread::acknowledge_yield_request();
  6931   ConcurrentMarkSweepThread::synchronize(true);
  6932   _bitMap->lock()->lock_without_safepoint_check();
  6933   _collector->startTimer();
  6936 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
  6937   assert(_bitMap->isMarked(ptr), "expected bit to be set");
  6938   assert(_markStack->isEmpty(),
  6939          "should drain stack to limit stack usage");
  6940   // convert ptr to an oop preparatory to scanning
  6941   oop obj = oop(ptr);
  6942   // Ignore mark word in verification below, since we
  6943   // may be running concurrent with mutators.
  6944   assert(obj->is_oop(true), "should be an oop");
  6945   assert(_finger <= ptr, "_finger runneth ahead");
  6946   // advance the finger to right end of this object
  6947   _finger = ptr + obj->size();
  6948   assert(_finger > ptr, "we just incremented it above");
  6949   // On large heaps, it may take us some time to get through
  6950   // the marking phase (especially if running iCMS). During
  6951   // this time it's possible that a lot of mutations have
  6952   // accumulated in the card table and the mod union table --
  6953   // these mutation records are redundant until we have
  6954   // actually traced into the corresponding card.
  6955   // Here, we check whether advancing the finger would make
  6956   // us cross into a new card, and if so clear corresponding
  6957   // cards in the MUT (preclean them in the card-table in the
  6958   // future).
  6960   DEBUG_ONLY(if (!_verifying) {)
  6961     // The clean-on-enter optimization is disabled by default,
  6962     // until we fix 6178663.
  6963     if (CMSCleanOnEnter && (_finger > _threshold)) {
  6964       // [_threshold, _finger) represents the interval
  6965       // of cards to be cleared  in MUT (or precleaned in card table).
  6966       // The set of cards to be cleared is all those that overlap
  6967       // with the interval [_threshold, _finger); note that
  6968       // _threshold is always kept card-aligned but _finger isn't
  6969       // always card-aligned.
  6970       HeapWord* old_threshold = _threshold;
  6971       assert(old_threshold == (HeapWord*)round_to(
  6972               (intptr_t)old_threshold, CardTableModRefBS::card_size),
  6973              "_threshold should always be card-aligned");
  6974       _threshold = (HeapWord*)round_to(
  6975                      (intptr_t)_finger, CardTableModRefBS::card_size);
  6976       MemRegion mr(old_threshold, _threshold);
  6977       assert(!mr.is_empty(), "Control point invariant");
  6978       assert(_span.contains(mr), "Should clear within span");
  6979       // XXX When _finger crosses from old gen into perm gen
  6980       // we may be doing unnecessary cleaning; do better in the
  6981       // future by detecting that condition and clearing fewer
  6982       // MUT/CT entries.
  6983       _mut->clear_range(mr);
  6985   DEBUG_ONLY(})
  6987   // Note: the finger doesn't advance while we drain
  6988   // the stack below.
  6989   PushOrMarkClosure pushOrMarkClosure(_collector,
  6990                                       _span, _bitMap, _markStack,
  6991                                       _revisitStack,
  6992                                       _finger, this);
  6993   bool res = _markStack->push(obj);
  6994   assert(res, "Empty non-zero size stack should have space for single push");
  6995   while (!_markStack->isEmpty()) {
  6996     oop new_oop = _markStack->pop();
  6997     // Skip verifying header mark word below because we are
  6998     // running concurrent with mutators.
  6999     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
  7000     // now scan this oop's oops
  7001     new_oop->oop_iterate(&pushOrMarkClosure);
  7002     do_yield_check();
  7004   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
  7007 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
  7008                        CMSCollector* collector, MemRegion span,
  7009                        CMSBitMap* bit_map,
  7010                        OopTaskQueue* work_queue,
  7011                        CMSMarkStack*  overflow_stack,
  7012                        CMSMarkStack*  revisit_stack,
  7013                        bool should_yield):
  7014   _collector(collector),
  7015   _whole_span(collector->_span),
  7016   _span(span),
  7017   _bit_map(bit_map),
  7018   _mut(&collector->_modUnionTable),
  7019   _work_queue(work_queue),
  7020   _overflow_stack(overflow_stack),
  7021   _revisit_stack(revisit_stack),
  7022   _yield(should_yield),
  7023   _skip_bits(0),
  7024   _task(task)
  7026   assert(_work_queue->size() == 0, "work_queue should be empty");
  7027   _finger = span.start();
  7028   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
  7029   assert(_span.contains(_finger), "Out of bounds _finger?");
  7032 // Should revisit to see if this should be restructured for
  7033 // greater efficiency.
  7034 void Par_MarkFromRootsClosure::do_bit(size_t offset) {
  7035   if (_skip_bits > 0) {
  7036     _skip_bits--;
  7037     return;
  7039   // convert offset into a HeapWord*
  7040   HeapWord* addr = _bit_map->startWord() + offset;
  7041   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
  7042          "address out of range");
  7043   assert(_bit_map->isMarked(addr), "tautology");
  7044   if (_bit_map->isMarked(addr+1)) {
  7045     // this is an allocated object that might not yet be initialized
  7046     assert(_skip_bits == 0, "tautology");
  7047     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
  7048     oop p = oop(addr);
  7049     if (p->klass() == NULL || !p->is_parsable()) {
  7050       // in the case of Clean-on-Enter optimization, redirty card
  7051       // and avoid clearing card by increasing  the threshold.
  7052       return;
  7055   scan_oops_in_oop(addr);
  7058 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
  7059   assert(_bit_map->isMarked(ptr), "expected bit to be set");
  7060   // Should we assert that our work queue is empty or
  7061   // below some drain limit?
  7062   assert(_work_queue->size() == 0,
  7063          "should drain stack to limit stack usage");
  7064   // convert ptr to an oop preparatory to scanning
  7065   oop obj = oop(ptr);
  7066   // Ignore mark word in verification below, since we
  7067   // may be running concurrent with mutators.
  7068   assert(obj->is_oop(true), "should be an oop");
  7069   assert(_finger <= ptr, "_finger runneth ahead");
  7070   // advance the finger to right end of this object
  7071   _finger = ptr + obj->size();
  7072   assert(_finger > ptr, "we just incremented it above");
  7073   // On large heaps, it may take us some time to get through
  7074   // the marking phase (especially if running iCMS). During
  7075   // this time it's possible that a lot of mutations have
  7076   // accumulated in the card table and the mod union table --
  7077   // these mutation records are redundant until we have
  7078   // actually traced into the corresponding card.
  7079   // Here, we check whether advancing the finger would make
  7080   // us cross into a new card, and if so clear corresponding
  7081   // cards in the MUT (preclean them in the card-table in the
  7082   // future).
  7084   // The clean-on-enter optimization is disabled by default,
  7085   // until we fix 6178663.
  7086   if (CMSCleanOnEnter && (_finger > _threshold)) {
  7087     // [_threshold, _finger) represents the interval
  7088     // of cards to be cleared  in MUT (or precleaned in card table).
  7089     // The set of cards to be cleared is all those that overlap
  7090     // with the interval [_threshold, _finger); note that
  7091     // _threshold is always kept card-aligned but _finger isn't
  7092     // always card-aligned.
  7093     HeapWord* old_threshold = _threshold;
  7094     assert(old_threshold == (HeapWord*)round_to(
  7095             (intptr_t)old_threshold, CardTableModRefBS::card_size),
  7096            "_threshold should always be card-aligned");
  7097     _threshold = (HeapWord*)round_to(
  7098                    (intptr_t)_finger, CardTableModRefBS::card_size);
  7099     MemRegion mr(old_threshold, _threshold);
  7100     assert(!mr.is_empty(), "Control point invariant");
  7101     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
  7102     // XXX When _finger crosses from old gen into perm gen
  7103     // we may be doing unnecessary cleaning; do better in the
  7104     // future by detecting that condition and clearing fewer
  7105     // MUT/CT entries.
  7106     _mut->clear_range(mr);
  7109   // Note: the local finger doesn't advance while we drain
  7110   // the stack below, but the global finger sure can and will.
  7111   HeapWord** gfa = _task->global_finger_addr();
  7112   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
  7113                                       _span, _bit_map,
  7114                                       _work_queue,
  7115                                       _overflow_stack,
  7116                                       _revisit_stack,
  7117                                       _finger,
  7118                                       gfa, this);
  7119   bool res = _work_queue->push(obj);   // overflow could occur here
  7120   assert(res, "Will hold once we use workqueues");
  7121   while (true) {
  7122     oop new_oop;
  7123     if (!_work_queue->pop_local(new_oop)) {
  7124       // We emptied our work_queue; check if there's stuff that can
  7125       // be gotten from the overflow stack.
  7126       if (CMSConcMarkingTask::get_work_from_overflow_stack(
  7127             _overflow_stack, _work_queue)) {
  7128         do_yield_check();
  7129         continue;
  7130       } else {  // done
  7131         break;
  7134     // Skip verifying header mark word below because we are
  7135     // running concurrent with mutators.
  7136     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
  7137     // now scan this oop's oops
  7138     new_oop->oop_iterate(&pushOrMarkClosure);
  7139     do_yield_check();
  7141   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
  7144 // Yield in response to a request from VM Thread or
  7145 // from mutators.
  7146 void Par_MarkFromRootsClosure::do_yield_work() {
  7147   assert(_task != NULL, "sanity");
  7148   _task->yield();
  7151 // A variant of the above used for verifying CMS marking work.
  7152 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
  7153                         MemRegion span,
  7154                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
  7155                         CMSMarkStack*  mark_stack):
  7156   _collector(collector),
  7157   _span(span),
  7158   _verification_bm(verification_bm),
  7159   _cms_bm(cms_bm),
  7160   _mark_stack(mark_stack),
  7161   _pam_verify_closure(collector, span, verification_bm, cms_bm,
  7162                       mark_stack)
  7164   assert(_mark_stack->isEmpty(), "stack should be empty");
  7165   _finger = _verification_bm->startWord();
  7166   assert(_collector->_restart_addr == NULL, "Sanity check");
  7167   assert(_span.contains(_finger), "Out of bounds _finger?");
  7170 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
  7171   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
  7172   assert(_span.contains(addr), "Out of bounds _finger?");
  7173   _finger = addr;
  7176 // Should revisit to see if this should be restructured for
  7177 // greater efficiency.
  7178 void MarkFromRootsVerifyClosure::do_bit(size_t offset) {
  7179   // convert offset into a HeapWord*
  7180   HeapWord* addr = _verification_bm->startWord() + offset;
  7181   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
  7182          "address out of range");
  7183   assert(_verification_bm->isMarked(addr), "tautology");
  7184   assert(_cms_bm->isMarked(addr), "tautology");
  7186   assert(_mark_stack->isEmpty(),
  7187          "should drain stack to limit stack usage");
  7188   // convert addr to an oop preparatory to scanning
  7189   oop obj = oop(addr);
  7190   assert(obj->is_oop(), "should be an oop");
  7191   assert(_finger <= addr, "_finger runneth ahead");
  7192   // advance the finger to right end of this object
  7193   _finger = addr + obj->size();
  7194   assert(_finger > addr, "we just incremented it above");
  7195   // Note: the finger doesn't advance while we drain
  7196   // the stack below.
  7197   bool res = _mark_stack->push(obj);
  7198   assert(res, "Empty non-zero size stack should have space for single push");
  7199   while (!_mark_stack->isEmpty()) {
  7200     oop new_oop = _mark_stack->pop();
  7201     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
  7202     // now scan this oop's oops
  7203     new_oop->oop_iterate(&_pam_verify_closure);
  7205   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
  7208 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
  7209   CMSCollector* collector, MemRegion span,
  7210   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
  7211   CMSMarkStack*  mark_stack):
  7212   OopClosure(collector->ref_processor()),
  7213   _collector(collector),
  7214   _span(span),
  7215   _verification_bm(verification_bm),
  7216   _cms_bm(cms_bm),
  7217   _mark_stack(mark_stack)
  7218 { }
  7220 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
  7221 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
  7223 // Upon stack overflow, we discard (part of) the stack,
  7224 // remembering the least address amongst those discarded
  7225 // in CMSCollector's _restart_address.
  7226 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
  7227   // Remember the least grey address discarded
  7228   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
  7229   _collector->lower_restart_addr(ra);
  7230   _mark_stack->reset();  // discard stack contents
  7231   _mark_stack->expand(); // expand the stack if possible
  7234 void PushAndMarkVerifyClosure::do_oop(oop obj) {
  7235   assert(obj->is_oop_or_null(), "expected an oop or NULL");
  7236   HeapWord* addr = (HeapWord*)obj;
  7237   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
  7238     // Oop lies in _span and isn't yet grey or black
  7239     _verification_bm->mark(addr);            // now grey
  7240     if (!_cms_bm->isMarked(addr)) {
  7241       oop(addr)->print();
  7242       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
  7243                              addr);
  7244       fatal("... aborting");
  7247     if (!_mark_stack->push(obj)) { // stack overflow
  7248       if (PrintCMSStatistics != 0) {
  7249         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
  7250                                SIZE_FORMAT, _mark_stack->capacity());
  7252       assert(_mark_stack->isFull(), "Else push should have succeeded");
  7253       handle_stack_overflow(addr);
  7255     // anything including and to the right of _finger
  7256     // will be scanned as we iterate over the remainder of the
  7257     // bit map
  7261 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
  7262                      MemRegion span,
  7263                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
  7264                      CMSMarkStack*  revisitStack,
  7265                      HeapWord* finger, MarkFromRootsClosure* parent) :
  7266   OopClosure(collector->ref_processor()),
  7267   _collector(collector),
  7268   _span(span),
  7269   _bitMap(bitMap),
  7270   _markStack(markStack),
  7271   _revisitStack(revisitStack),
  7272   _finger(finger),
  7273   _parent(parent),
  7274   _should_remember_klasses(collector->should_unload_classes())
  7275 { }
  7277 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
  7278                      MemRegion span,
  7279                      CMSBitMap* bit_map,
  7280                      OopTaskQueue* work_queue,
  7281                      CMSMarkStack*  overflow_stack,
  7282                      CMSMarkStack*  revisit_stack,
  7283                      HeapWord* finger,
  7284                      HeapWord** global_finger_addr,
  7285                      Par_MarkFromRootsClosure* parent) :
  7286   OopClosure(collector->ref_processor()),
  7287   _collector(collector),
  7288   _whole_span(collector->_span),
  7289   _span(span),
  7290   _bit_map(bit_map),
  7291   _work_queue(work_queue),
  7292   _overflow_stack(overflow_stack),
  7293   _revisit_stack(revisit_stack),
  7294   _finger(finger),
  7295   _global_finger_addr(global_finger_addr),
  7296   _parent(parent),
  7297   _should_remember_klasses(collector->should_unload_classes())
  7298 { }
  7300 void CMSCollector::lower_restart_addr(HeapWord* low) {
  7301   assert(_span.contains(low), "Out of bounds addr");
  7302   if (_restart_addr == NULL) {
  7303     _restart_addr = low;
  7304   } else {
  7305     _restart_addr = MIN2(_restart_addr, low);
  7309 // Upon stack overflow, we discard (part of) the stack,
  7310 // remembering the least address amongst those discarded
  7311 // in CMSCollector's _restart_address.
  7312 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
  7313   // Remember the least grey address discarded
  7314   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
  7315   _collector->lower_restart_addr(ra);
  7316   _markStack->reset();  // discard stack contents
  7317   _markStack->expand(); // expand the stack if possible
  7320 // Upon stack overflow, we discard (part of) the stack,
  7321 // remembering the least address amongst those discarded
  7322 // in CMSCollector's _restart_address.
  7323 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
  7324   // We need to do this under a mutex to prevent other
  7325   // workers from interfering with the expansion below.
  7326   MutexLockerEx ml(_overflow_stack->par_lock(),
  7327                    Mutex::_no_safepoint_check_flag);
  7328   // Remember the least grey address discarded
  7329   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
  7330   _collector->lower_restart_addr(ra);
  7331   _overflow_stack->reset();  // discard stack contents
  7332   _overflow_stack->expand(); // expand the stack if possible
  7335 void PushOrMarkClosure::do_oop(oop obj) {
  7336   // Ignore mark word because we are running concurrent with mutators.
  7337   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
  7338   HeapWord* addr = (HeapWord*)obj;
  7339   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
  7340     // Oop lies in _span and isn't yet grey or black
  7341     _bitMap->mark(addr);            // now grey
  7342     if (addr < _finger) {
  7343       // the bit map iteration has already either passed, or
  7344       // sampled, this bit in the bit map; we'll need to
  7345       // use the marking stack to scan this oop's oops.
  7346       bool simulate_overflow = false;
  7347       NOT_PRODUCT(
  7348         if (CMSMarkStackOverflowALot &&
  7349             _collector->simulate_overflow()) {
  7350           // simulate a stack overflow
  7351           simulate_overflow = true;
  7354       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
  7355         if (PrintCMSStatistics != 0) {
  7356           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
  7357                                  SIZE_FORMAT, _markStack->capacity());
  7359         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
  7360         handle_stack_overflow(addr);
  7363     // anything including and to the right of _finger
  7364     // will be scanned as we iterate over the remainder of the
  7365     // bit map
  7366     do_yield_check();
  7370 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
  7371 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
  7373 void Par_PushOrMarkClosure::do_oop(oop obj) {
  7374   // Ignore mark word because we are running concurrent with mutators.
  7375   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
  7376   HeapWord* addr = (HeapWord*)obj;
  7377   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
  7378     // Oop lies in _span and isn't yet grey or black
  7379     // We read the global_finger (volatile read) strictly after marking oop
  7380     bool res = _bit_map->par_mark(addr);    // now grey
  7381     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
  7382     // Should we push this marked oop on our stack?
  7383     // -- if someone else marked it, nothing to do
  7384     // -- if target oop is above global finger nothing to do
  7385     // -- if target oop is in chunk and above local finger
  7386     //      then nothing to do
  7387     // -- else push on work queue
  7388     if (   !res       // someone else marked it, they will deal with it
  7389         || (addr >= *gfa)  // will be scanned in a later task
  7390         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
  7391       return;
  7393     // the bit map iteration has already either passed, or
  7394     // sampled, this bit in the bit map; we'll need to
  7395     // use the marking stack to scan this oop's oops.
  7396     bool simulate_overflow = false;
  7397     NOT_PRODUCT(
  7398       if (CMSMarkStackOverflowALot &&
  7399           _collector->simulate_overflow()) {
  7400         // simulate a stack overflow
  7401         simulate_overflow = true;
  7404     if (simulate_overflow ||
  7405         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
  7406       // stack overflow
  7407       if (PrintCMSStatistics != 0) {
  7408         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
  7409                                SIZE_FORMAT, _overflow_stack->capacity());
  7411       // We cannot assert that the overflow stack is full because
  7412       // it may have been emptied since.
  7413       assert(simulate_overflow ||
  7414              _work_queue->size() == _work_queue->max_elems(),
  7415             "Else push should have succeeded");
  7416       handle_stack_overflow(addr);
  7418     do_yield_check();
  7422 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
  7423 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
  7425 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
  7426                                        MemRegion span,
  7427                                        ReferenceProcessor* rp,
  7428                                        CMSBitMap* bit_map,
  7429                                        CMSBitMap* mod_union_table,
  7430                                        CMSMarkStack*  mark_stack,
  7431                                        CMSMarkStack*  revisit_stack,
  7432                                        bool           concurrent_precleaning):
  7433   OopClosure(rp),
  7434   _collector(collector),
  7435   _span(span),
  7436   _bit_map(bit_map),
  7437   _mod_union_table(mod_union_table),
  7438   _mark_stack(mark_stack),
  7439   _revisit_stack(revisit_stack),
  7440   _concurrent_precleaning(concurrent_precleaning),
  7441   _should_remember_klasses(collector->should_unload_classes())
  7443   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
  7446 // Grey object rescan during pre-cleaning and second checkpoint phases --
  7447 // the non-parallel version (the parallel version appears further below.)
  7448 void PushAndMarkClosure::do_oop(oop obj) {
  7449   // If _concurrent_precleaning, ignore mark word verification
  7450   assert(obj->is_oop_or_null(_concurrent_precleaning),
  7451          "expected an oop or NULL");
  7452   HeapWord* addr = (HeapWord*)obj;
  7453   // Check if oop points into the CMS generation
  7454   // and is not marked
  7455   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
  7456     // a white object ...
  7457     _bit_map->mark(addr);         // ... now grey
  7458     // push on the marking stack (grey set)
  7459     bool simulate_overflow = false;
  7460     NOT_PRODUCT(
  7461       if (CMSMarkStackOverflowALot &&
  7462           _collector->simulate_overflow()) {
  7463         // simulate a stack overflow
  7464         simulate_overflow = true;
  7467     if (simulate_overflow || !_mark_stack->push(obj)) {
  7468       if (_concurrent_precleaning) {
  7469          // During precleaning we can just dirty the appropriate card
  7470          // in the mod union table, thus ensuring that the object remains
  7471          // in the grey set  and continue. Note that no one can be intefering
  7472          // with us in this action of dirtying the mod union table, so
  7473          // no locking is required.
  7474          _mod_union_table->mark(addr);
  7475          _collector->_ser_pmc_preclean_ovflw++;
  7476       } else {
  7477          // During the remark phase, we need to remember this oop
  7478          // in the overflow list.
  7479          _collector->push_on_overflow_list(obj);
  7480          _collector->_ser_pmc_remark_ovflw++;
  7486 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
  7487                                                MemRegion span,
  7488                                                ReferenceProcessor* rp,
  7489                                                CMSBitMap* bit_map,
  7490                                                OopTaskQueue* work_queue,
  7491                                                CMSMarkStack* revisit_stack):
  7492   OopClosure(rp),
  7493   _collector(collector),
  7494   _span(span),
  7495   _bit_map(bit_map),
  7496   _work_queue(work_queue),
  7497   _revisit_stack(revisit_stack),
  7498   _should_remember_klasses(collector->should_unload_classes())
  7500   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
  7503 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
  7504 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
  7506 // Grey object rescan during second checkpoint phase --
  7507 // the parallel version.
  7508 void Par_PushAndMarkClosure::do_oop(oop obj) {
  7509   // In the assert below, we ignore the mark word because
  7510   // this oop may point to an already visited object that is
  7511   // on the overflow stack (in which case the mark word has
  7512   // been hijacked for chaining into the overflow stack --
  7513   // if this is the last object in the overflow stack then
  7514   // its mark word will be NULL). Because this object may
  7515   // have been subsequently popped off the global overflow
  7516   // stack, and the mark word possibly restored to the prototypical
  7517   // value, by the time we get to examined this failing assert in
  7518   // the debugger, is_oop_or_null(false) may subsequently start
  7519   // to hold.
  7520   assert(obj->is_oop_or_null(true),
  7521          "expected an oop or NULL");
  7522   HeapWord* addr = (HeapWord*)obj;
  7523   // Check if oop points into the CMS generation
  7524   // and is not marked
  7525   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
  7526     // a white object ...
  7527     // If we manage to "claim" the object, by being the
  7528     // first thread to mark it, then we push it on our
  7529     // marking stack
  7530     if (_bit_map->par_mark(addr)) {     // ... now grey
  7531       // push on work queue (grey set)
  7532       bool simulate_overflow = false;
  7533       NOT_PRODUCT(
  7534         if (CMSMarkStackOverflowALot &&
  7535             _collector->par_simulate_overflow()) {
  7536           // simulate a stack overflow
  7537           simulate_overflow = true;
  7540       if (simulate_overflow || !_work_queue->push(obj)) {
  7541         _collector->par_push_on_overflow_list(obj);
  7542         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
  7544     } // Else, some other thread got there first
  7548 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
  7549 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
  7551 void PushAndMarkClosure::remember_klass(Klass* k) {
  7552   if (!_revisit_stack->push(oop(k))) {
  7553     fatal("Revisit stack overflowed in PushAndMarkClosure");
  7557 void Par_PushAndMarkClosure::remember_klass(Klass* k) {
  7558   if (!_revisit_stack->par_push(oop(k))) {
  7559     fatal("Revist stack overflowed in Par_PushAndMarkClosure");
  7563 void CMSPrecleanRefsYieldClosure::do_yield_work() {
  7564   Mutex* bml = _collector->bitMapLock();
  7565   assert_lock_strong(bml);
  7566   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  7567          "CMS thread should hold CMS token");
  7569   bml->unlock();
  7570   ConcurrentMarkSweepThread::desynchronize(true);
  7572   ConcurrentMarkSweepThread::acknowledge_yield_request();
  7574   _collector->stopTimer();
  7575   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  7576   if (PrintCMSStatistics != 0) {
  7577     _collector->incrementYields();
  7579   _collector->icms_wait();
  7581   // See the comment in coordinator_yield()
  7582   for (unsigned i = 0; i < CMSYieldSleepCount &&
  7583                        ConcurrentMarkSweepThread::should_yield() &&
  7584                        !CMSCollector::foregroundGCIsActive(); ++i) {
  7585     os::sleep(Thread::current(), 1, false);
  7586     ConcurrentMarkSweepThread::acknowledge_yield_request();
  7589   ConcurrentMarkSweepThread::synchronize(true);
  7590   bml->lock();
  7592   _collector->startTimer();
  7595 bool CMSPrecleanRefsYieldClosure::should_return() {
  7596   if (ConcurrentMarkSweepThread::should_yield()) {
  7597     do_yield_work();
  7599   return _collector->foregroundGCIsActive();
  7602 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
  7603   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
  7604          "mr should be aligned to start at a card boundary");
  7605   // We'd like to assert:
  7606   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
  7607   //        "mr should be a range of cards");
  7608   // However, that would be too strong in one case -- the last
  7609   // partition ends at _unallocated_block which, in general, can be
  7610   // an arbitrary boundary, not necessarily card aligned.
  7611   if (PrintCMSStatistics != 0) {
  7612     _num_dirty_cards +=
  7613          mr.word_size()/CardTableModRefBS::card_size_in_words;
  7615   _space->object_iterate_mem(mr, &_scan_cl);
  7618 SweepClosure::SweepClosure(CMSCollector* collector,
  7619                            ConcurrentMarkSweepGeneration* g,
  7620                            CMSBitMap* bitMap, bool should_yield) :
  7621   _collector(collector),
  7622   _g(g),
  7623   _sp(g->cmsSpace()),
  7624   _limit(_sp->sweep_limit()),
  7625   _freelistLock(_sp->freelistLock()),
  7626   _bitMap(bitMap),
  7627   _yield(should_yield),
  7628   _inFreeRange(false),           // No free range at beginning of sweep
  7629   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
  7630   _lastFreeRangeCoalesced(false),
  7631   _freeFinger(g->used_region().start())
  7633   NOT_PRODUCT(
  7634     _numObjectsFreed = 0;
  7635     _numWordsFreed   = 0;
  7636     _numObjectsLive = 0;
  7637     _numWordsLive = 0;
  7638     _numObjectsAlreadyFree = 0;
  7639     _numWordsAlreadyFree = 0;
  7640     _last_fc = NULL;
  7642     _sp->initializeIndexedFreeListArrayReturnedBytes();
  7643     _sp->dictionary()->initializeDictReturnedBytes();
  7645   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
  7646          "sweep _limit out of bounds");
  7647   if (CMSTraceSweeper) {
  7648     gclog_or_tty->print("\n====================\nStarting new sweep\n");
  7652 // We need this destructor to reclaim any space at the end
  7653 // of the space, which do_blk below may not have added back to
  7654 // the free lists. [basically dealing with the "fringe effect"]
  7655 SweepClosure::~SweepClosure() {
  7656   assert_lock_strong(_freelistLock);
  7657   // this should be treated as the end of a free run if any
  7658   // The current free range should be returned to the free lists
  7659   // as one coalesced chunk.
  7660   if (inFreeRange()) {
  7661     flushCurFreeChunk(freeFinger(),
  7662       pointer_delta(_limit, freeFinger()));
  7663     assert(freeFinger() < _limit, "the finger pointeth off base");
  7664     if (CMSTraceSweeper) {
  7665       gclog_or_tty->print("destructor:");
  7666       gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") "
  7667                  "[coalesced:"SIZE_FORMAT"]\n",
  7668                  freeFinger(), pointer_delta(_limit, freeFinger()),
  7669                  lastFreeRangeCoalesced());
  7672   NOT_PRODUCT(
  7673     if (Verbose && PrintGC) {
  7674       gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
  7675                           SIZE_FORMAT " bytes",
  7676                  _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
  7677       gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
  7678                              SIZE_FORMAT" bytes  "
  7679         "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
  7680         _numObjectsLive, _numWordsLive*sizeof(HeapWord),
  7681         _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
  7682       size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
  7683         sizeof(HeapWord);
  7684       gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
  7686       if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
  7687         size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
  7688         size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
  7689         size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
  7690         gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
  7691         gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
  7692           indexListReturnedBytes);
  7693         gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
  7694           dictReturnedBytes);
  7698   // Now, in debug mode, just null out the sweep_limit
  7699   NOT_PRODUCT(_sp->clear_sweep_limit();)
  7700   if (CMSTraceSweeper) {
  7701     gclog_or_tty->print("end of sweep\n================\n");
  7705 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
  7706     bool freeRangeInFreeLists) {
  7707   if (CMSTraceSweeper) {
  7708     gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n",
  7709                freeFinger, _sp->block_size(freeFinger),
  7710                freeRangeInFreeLists);
  7712   assert(!inFreeRange(), "Trampling existing free range");
  7713   set_inFreeRange(true);
  7714   set_lastFreeRangeCoalesced(false);
  7716   set_freeFinger(freeFinger);
  7717   set_freeRangeInFreeLists(freeRangeInFreeLists);
  7718   if (CMSTestInFreeList) {
  7719     if (freeRangeInFreeLists) {
  7720       FreeChunk* fc = (FreeChunk*) freeFinger;
  7721       assert(fc->isFree(), "A chunk on the free list should be free.");
  7722       assert(fc->size() > 0, "Free range should have a size");
  7723       assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
  7728 // Note that the sweeper runs concurrently with mutators. Thus,
  7729 // it is possible for direct allocation in this generation to happen
  7730 // in the middle of the sweep. Note that the sweeper also coalesces
  7731 // contiguous free blocks. Thus, unless the sweeper and the allocator
  7732 // synchronize appropriately freshly allocated blocks may get swept up.
  7733 // This is accomplished by the sweeper locking the free lists while
  7734 // it is sweeping. Thus blocks that are determined to be free are
  7735 // indeed free. There is however one additional complication:
  7736 // blocks that have been allocated since the final checkpoint and
  7737 // mark, will not have been marked and so would be treated as
  7738 // unreachable and swept up. To prevent this, the allocator marks
  7739 // the bit map when allocating during the sweep phase. This leads,
  7740 // however, to a further complication -- objects may have been allocated
  7741 // but not yet initialized -- in the sense that the header isn't yet
  7742 // installed. The sweeper can not then determine the size of the block
  7743 // in order to skip over it. To deal with this case, we use a technique
  7744 // (due to Printezis) to encode such uninitialized block sizes in the
  7745 // bit map. Since the bit map uses a bit per every HeapWord, but the
  7746 // CMS generation has a minimum object size of 3 HeapWords, it follows
  7747 // that "normal marks" won't be adjacent in the bit map (there will
  7748 // always be at least two 0 bits between successive 1 bits). We make use
  7749 // of these "unused" bits to represent uninitialized blocks -- the bit
  7750 // corresponding to the start of the uninitialized object and the next
  7751 // bit are both set. Finally, a 1 bit marks the end of the object that
  7752 // started with the two consecutive 1 bits to indicate its potentially
  7753 // uninitialized state.
  7755 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
  7756   FreeChunk* fc = (FreeChunk*)addr;
  7757   size_t res;
  7759   // check if we are done sweepinrg
  7760   if (addr == _limit) { // we have swept up to the limit, do nothing more
  7761     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
  7762            "sweep _limit out of bounds");
  7763     // help the closure application finish
  7764     return pointer_delta(_sp->end(), _limit);
  7766   assert(addr <= _limit, "sweep invariant");
  7768   // check if we should yield
  7769   do_yield_check(addr);
  7770   if (fc->isFree()) {
  7771     // Chunk that is already free
  7772     res = fc->size();
  7773     doAlreadyFreeChunk(fc);
  7774     debug_only(_sp->verifyFreeLists());
  7775     assert(res == fc->size(), "Don't expect the size to change");
  7776     NOT_PRODUCT(
  7777       _numObjectsAlreadyFree++;
  7778       _numWordsAlreadyFree += res;
  7780     NOT_PRODUCT(_last_fc = fc;)
  7781   } else if (!_bitMap->isMarked(addr)) {
  7782     // Chunk is fresh garbage
  7783     res = doGarbageChunk(fc);
  7784     debug_only(_sp->verifyFreeLists());
  7785     NOT_PRODUCT(
  7786       _numObjectsFreed++;
  7787       _numWordsFreed += res;
  7789   } else {
  7790     // Chunk that is alive.
  7791     res = doLiveChunk(fc);
  7792     debug_only(_sp->verifyFreeLists());
  7793     NOT_PRODUCT(
  7794         _numObjectsLive++;
  7795         _numWordsLive += res;
  7798   return res;
  7801 // For the smart allocation, record following
  7802 //  split deaths - a free chunk is removed from its free list because
  7803 //      it is being split into two or more chunks.
  7804 //  split birth - a free chunk is being added to its free list because
  7805 //      a larger free chunk has been split and resulted in this free chunk.
  7806 //  coal death - a free chunk is being removed from its free list because
  7807 //      it is being coalesced into a large free chunk.
  7808 //  coal birth - a free chunk is being added to its free list because
  7809 //      it was created when two or more free chunks where coalesced into
  7810 //      this free chunk.
  7811 //
  7812 // These statistics are used to determine the desired number of free
  7813 // chunks of a given size.  The desired number is chosen to be relative
  7814 // to the end of a CMS sweep.  The desired number at the end of a sweep
  7815 // is the
  7816 //      count-at-end-of-previous-sweep (an amount that was enough)
  7817 //              - count-at-beginning-of-current-sweep  (the excess)
  7818 //              + split-births  (gains in this size during interval)
  7819 //              - split-deaths  (demands on this size during interval)
  7820 // where the interval is from the end of one sweep to the end of the
  7821 // next.
  7822 //
  7823 // When sweeping the sweeper maintains an accumulated chunk which is
  7824 // the chunk that is made up of chunks that have been coalesced.  That
  7825 // will be termed the left-hand chunk.  A new chunk of garbage that
  7826 // is being considered for coalescing will be referred to as the
  7827 // right-hand chunk.
  7828 //
  7829 // When making a decision on whether to coalesce a right-hand chunk with
  7830 // the current left-hand chunk, the current count vs. the desired count
  7831 // of the left-hand chunk is considered.  Also if the right-hand chunk
  7832 // is near the large chunk at the end of the heap (see
  7833 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
  7834 // left-hand chunk is coalesced.
  7835 //
  7836 // When making a decision about whether to split a chunk, the desired count
  7837 // vs. the current count of the candidate to be split is also considered.
  7838 // If the candidate is underpopulated (currently fewer chunks than desired)
  7839 // a chunk of an overpopulated (currently more chunks than desired) size may
  7840 // be chosen.  The "hint" associated with a free list, if non-null, points
  7841 // to a free list which may be overpopulated.
  7842 //
  7844 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) {
  7845   size_t size = fc->size();
  7846   // Chunks that cannot be coalesced are not in the
  7847   // free lists.
  7848   if (CMSTestInFreeList && !fc->cantCoalesce()) {
  7849     assert(_sp->verifyChunkInFreeLists(fc),
  7850       "free chunk should be in free lists");
  7852   // a chunk that is already free, should not have been
  7853   // marked in the bit map
  7854   HeapWord* addr = (HeapWord*) fc;
  7855   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
  7856   // Verify that the bit map has no bits marked between
  7857   // addr and purported end of this block.
  7858   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
  7860   // Some chunks cannot be coalesced in under any circumstances.
  7861   // See the definition of cantCoalesce().
  7862   if (!fc->cantCoalesce()) {
  7863     // This chunk can potentially be coalesced.
  7864     if (_sp->adaptive_freelists()) {
  7865       // All the work is done in
  7866       doPostIsFreeOrGarbageChunk(fc, size);
  7867     } else {  // Not adaptive free lists
  7868       // this is a free chunk that can potentially be coalesced by the sweeper;
  7869       if (!inFreeRange()) {
  7870         // if the next chunk is a free block that can't be coalesced
  7871         // it doesn't make sense to remove this chunk from the free lists
  7872         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
  7873         assert((HeapWord*)nextChunk <= _limit, "sweep invariant");
  7874         if ((HeapWord*)nextChunk < _limit  &&    // there's a next chunk...
  7875             nextChunk->isFree()    &&            // which is free...
  7876             nextChunk->cantCoalesce()) {         // ... but cant be coalesced
  7877           // nothing to do
  7878         } else {
  7879           // Potentially the start of a new free range:
  7880           // Don't eagerly remove it from the free lists.
  7881           // No need to remove it if it will just be put
  7882           // back again.  (Also from a pragmatic point of view
  7883           // if it is a free block in a region that is beyond
  7884           // any allocated blocks, an assertion will fail)
  7885           // Remember the start of a free run.
  7886           initialize_free_range(addr, true);
  7887           // end - can coalesce with next chunk
  7889       } else {
  7890         // the midst of a free range, we are coalescing
  7891         debug_only(record_free_block_coalesced(fc);)
  7892         if (CMSTraceSweeper) {
  7893           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
  7895         // remove it from the free lists
  7896         _sp->removeFreeChunkFromFreeLists(fc);
  7897         set_lastFreeRangeCoalesced(true);
  7898         // If the chunk is being coalesced and the current free range is
  7899         // in the free lists, remove the current free range so that it
  7900         // will be returned to the free lists in its entirety - all
  7901         // the coalesced pieces included.
  7902         if (freeRangeInFreeLists()) {
  7903           FreeChunk* ffc = (FreeChunk*) freeFinger();
  7904           assert(ffc->size() == pointer_delta(addr, freeFinger()),
  7905             "Size of free range is inconsistent with chunk size.");
  7906           if (CMSTestInFreeList) {
  7907             assert(_sp->verifyChunkInFreeLists(ffc),
  7908               "free range is not in free lists");
  7910           _sp->removeFreeChunkFromFreeLists(ffc);
  7911           set_freeRangeInFreeLists(false);
  7915   } else {
  7916     // Code path common to both original and adaptive free lists.
  7918     // cant coalesce with previous block; this should be treated
  7919     // as the end of a free run if any
  7920     if (inFreeRange()) {
  7921       // we kicked some butt; time to pick up the garbage
  7922       assert(freeFinger() < addr, "the finger pointeth off base");
  7923       flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
  7925     // else, nothing to do, just continue
  7929 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) {
  7930   // This is a chunk of garbage.  It is not in any free list.
  7931   // Add it to a free list or let it possibly be coalesced into
  7932   // a larger chunk.
  7933   HeapWord* addr = (HeapWord*) fc;
  7934   size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
  7936   if (_sp->adaptive_freelists()) {
  7937     // Verify that the bit map has no bits marked between
  7938     // addr and purported end of just dead object.
  7939     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
  7941     doPostIsFreeOrGarbageChunk(fc, size);
  7942   } else {
  7943     if (!inFreeRange()) {
  7944       // start of a new free range
  7945       assert(size > 0, "A free range should have a size");
  7946       initialize_free_range(addr, false);
  7948     } else {
  7949       // this will be swept up when we hit the end of the
  7950       // free range
  7951       if (CMSTraceSweeper) {
  7952         gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
  7954       // If the chunk is being coalesced and the current free range is
  7955       // in the free lists, remove the current free range so that it
  7956       // will be returned to the free lists in its entirety - all
  7957       // the coalesced pieces included.
  7958       if (freeRangeInFreeLists()) {
  7959         FreeChunk* ffc = (FreeChunk*)freeFinger();
  7960         assert(ffc->size() == pointer_delta(addr, freeFinger()),
  7961           "Size of free range is inconsistent with chunk size.");
  7962         if (CMSTestInFreeList) {
  7963           assert(_sp->verifyChunkInFreeLists(ffc),
  7964             "free range is not in free lists");
  7966         _sp->removeFreeChunkFromFreeLists(ffc);
  7967         set_freeRangeInFreeLists(false);
  7969       set_lastFreeRangeCoalesced(true);
  7971     // this will be swept up when we hit the end of the free range
  7973     // Verify that the bit map has no bits marked between
  7974     // addr and purported end of just dead object.
  7975     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
  7977   return size;
  7980 size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
  7981   HeapWord* addr = (HeapWord*) fc;
  7982   // The sweeper has just found a live object. Return any accumulated
  7983   // left hand chunk to the free lists.
  7984   if (inFreeRange()) {
  7985     if (_sp->adaptive_freelists()) {
  7986       flushCurFreeChunk(freeFinger(),
  7987                         pointer_delta(addr, freeFinger()));
  7988     } else { // not adaptive freelists
  7989       set_inFreeRange(false);
  7990       // Add the free range back to the free list if it is not already
  7991       // there.
  7992       if (!freeRangeInFreeLists()) {
  7993         assert(freeFinger() < addr, "the finger pointeth off base");
  7994         if (CMSTraceSweeper) {
  7995           gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) "
  7996             "[coalesced:%d]\n",
  7997             freeFinger(), pointer_delta(addr, freeFinger()),
  7998             lastFreeRangeCoalesced());
  8000         _sp->addChunkAndRepairOffsetTable(freeFinger(),
  8001           pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced());
  8006   // Common code path for original and adaptive free lists.
  8008   // this object is live: we'd normally expect this to be
  8009   // an oop, and like to assert the following:
  8010   // assert(oop(addr)->is_oop(), "live block should be an oop");
  8011   // However, as we commented above, this may be an object whose
  8012   // header hasn't yet been initialized.
  8013   size_t size;
  8014   assert(_bitMap->isMarked(addr), "Tautology for this control point");
  8015   if (_bitMap->isMarked(addr + 1)) {
  8016     // Determine the size from the bit map, rather than trying to
  8017     // compute it from the object header.
  8018     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
  8019     size = pointer_delta(nextOneAddr + 1, addr);
  8020     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  8021            "alignment problem");
  8023     #ifdef DEBUG
  8024       if (oop(addr)->klass() != NULL &&
  8025           (   !_collector->should_unload_classes()
  8026            || oop(addr)->is_parsable())) {
  8027         // Ignore mark word because we are running concurrent with mutators
  8028         assert(oop(addr)->is_oop(true), "live block should be an oop");
  8029         assert(size ==
  8030                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
  8031                "P-mark and computed size do not agree");
  8033     #endif
  8035   } else {
  8036     // This should be an initialized object that's alive.
  8037     assert(oop(addr)->klass() != NULL &&
  8038            (!_collector->should_unload_classes()
  8039             || oop(addr)->is_parsable()),
  8040            "Should be an initialized object");
  8041     // Ignore mark word because we are running concurrent with mutators
  8042     assert(oop(addr)->is_oop(true), "live block should be an oop");
  8043     // Verify that the bit map has no bits marked between
  8044     // addr and purported end of this block.
  8045     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
  8046     assert(size >= 3, "Necessary for Printezis marks to work");
  8047     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
  8048     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
  8050   return size;
  8053 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc,
  8054                                             size_t chunkSize) {
  8055   // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation
  8056   // scheme.
  8057   bool fcInFreeLists = fc->isFree();
  8058   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
  8059   assert((HeapWord*)fc <= _limit, "sweep invariant");
  8060   if (CMSTestInFreeList && fcInFreeLists) {
  8061     assert(_sp->verifyChunkInFreeLists(fc),
  8062       "free chunk is not in free lists");
  8066   if (CMSTraceSweeper) {
  8067     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
  8070   HeapWord* addr = (HeapWord*) fc;
  8072   bool coalesce;
  8073   size_t left  = pointer_delta(addr, freeFinger());
  8074   size_t right = chunkSize;
  8075   switch (FLSCoalescePolicy) {
  8076     // numeric value forms a coalition aggressiveness metric
  8077     case 0:  { // never coalesce
  8078       coalesce = false;
  8079       break;
  8081     case 1: { // coalesce if left & right chunks on overpopulated lists
  8082       coalesce = _sp->coalOverPopulated(left) &&
  8083                  _sp->coalOverPopulated(right);
  8084       break;
  8086     case 2: { // coalesce if left chunk on overpopulated list (default)
  8087       coalesce = _sp->coalOverPopulated(left);
  8088       break;
  8090     case 3: { // coalesce if left OR right chunk on overpopulated list
  8091       coalesce = _sp->coalOverPopulated(left) ||
  8092                  _sp->coalOverPopulated(right);
  8093       break;
  8095     case 4: { // always coalesce
  8096       coalesce = true;
  8097       break;
  8099     default:
  8100      ShouldNotReachHere();
  8103   // Should the current free range be coalesced?
  8104   // If the chunk is in a free range and either we decided to coalesce above
  8105   // or the chunk is near the large block at the end of the heap
  8106   // (isNearLargestChunk() returns true), then coalesce this chunk.
  8107   bool doCoalesce = inFreeRange() &&
  8108     (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
  8109   if (doCoalesce) {
  8110     // Coalesce the current free range on the left with the new
  8111     // chunk on the right.  If either is on a free list,
  8112     // it must be removed from the list and stashed in the closure.
  8113     if (freeRangeInFreeLists()) {
  8114       FreeChunk* ffc = (FreeChunk*)freeFinger();
  8115       assert(ffc->size() == pointer_delta(addr, freeFinger()),
  8116         "Size of free range is inconsistent with chunk size.");
  8117       if (CMSTestInFreeList) {
  8118         assert(_sp->verifyChunkInFreeLists(ffc),
  8119           "Chunk is not in free lists");
  8121       _sp->coalDeath(ffc->size());
  8122       _sp->removeFreeChunkFromFreeLists(ffc);
  8123       set_freeRangeInFreeLists(false);
  8125     if (fcInFreeLists) {
  8126       _sp->coalDeath(chunkSize);
  8127       assert(fc->size() == chunkSize,
  8128         "The chunk has the wrong size or is not in the free lists");
  8129       _sp->removeFreeChunkFromFreeLists(fc);
  8131     set_lastFreeRangeCoalesced(true);
  8132   } else {  // not in a free range and/or should not coalesce
  8133     // Return the current free range and start a new one.
  8134     if (inFreeRange()) {
  8135       // In a free range but cannot coalesce with the right hand chunk.
  8136       // Put the current free range into the free lists.
  8137       flushCurFreeChunk(freeFinger(),
  8138         pointer_delta(addr, freeFinger()));
  8140     // Set up for new free range.  Pass along whether the right hand
  8141     // chunk is in the free lists.
  8142     initialize_free_range((HeapWord*)fc, fcInFreeLists);
  8145 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) {
  8146   assert(inFreeRange(), "Should only be called if currently in a free range.");
  8147   assert(size > 0,
  8148     "A zero sized chunk cannot be added to the free lists.");
  8149   if (!freeRangeInFreeLists()) {
  8150     if(CMSTestInFreeList) {
  8151       FreeChunk* fc = (FreeChunk*) chunk;
  8152       fc->setSize(size);
  8153       assert(!_sp->verifyChunkInFreeLists(fc),
  8154         "chunk should not be in free lists yet");
  8156     if (CMSTraceSweeper) {
  8157       gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
  8158                     chunk, size);
  8160     // A new free range is going to be starting.  The current
  8161     // free range has not been added to the free lists yet or
  8162     // was removed so add it back.
  8163     // If the current free range was coalesced, then the death
  8164     // of the free range was recorded.  Record a birth now.
  8165     if (lastFreeRangeCoalesced()) {
  8166       _sp->coalBirth(size);
  8168     _sp->addChunkAndRepairOffsetTable(chunk, size,
  8169             lastFreeRangeCoalesced());
  8171   set_inFreeRange(false);
  8172   set_freeRangeInFreeLists(false);
  8175 // We take a break if we've been at this for a while,
  8176 // so as to avoid monopolizing the locks involved.
  8177 void SweepClosure::do_yield_work(HeapWord* addr) {
  8178   // Return current free chunk being used for coalescing (if any)
  8179   // to the appropriate freelist.  After yielding, the next
  8180   // free block encountered will start a coalescing range of
  8181   // free blocks.  If the next free block is adjacent to the
  8182   // chunk just flushed, they will need to wait for the next
  8183   // sweep to be coalesced.
  8184   if (inFreeRange()) {
  8185     flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger()));
  8188   // First give up the locks, then yield, then re-lock.
  8189   // We should probably use a constructor/destructor idiom to
  8190   // do this unlock/lock or modify the MutexUnlocker class to
  8191   // serve our purpose. XXX
  8192   assert_lock_strong(_bitMap->lock());
  8193   assert_lock_strong(_freelistLock);
  8194   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  8195          "CMS thread should hold CMS token");
  8196   _bitMap->lock()->unlock();
  8197   _freelistLock->unlock();
  8198   ConcurrentMarkSweepThread::desynchronize(true);
  8199   ConcurrentMarkSweepThread::acknowledge_yield_request();
  8200   _collector->stopTimer();
  8201   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
  8202   if (PrintCMSStatistics != 0) {
  8203     _collector->incrementYields();
  8205   _collector->icms_wait();
  8207   // See the comment in coordinator_yield()
  8208   for (unsigned i = 0; i < CMSYieldSleepCount &&
  8209                        ConcurrentMarkSweepThread::should_yield() &&
  8210                        !CMSCollector::foregroundGCIsActive(); ++i) {
  8211     os::sleep(Thread::current(), 1, false);
  8212     ConcurrentMarkSweepThread::acknowledge_yield_request();
  8215   ConcurrentMarkSweepThread::synchronize(true);
  8216   _freelistLock->lock();
  8217   _bitMap->lock()->lock_without_safepoint_check();
  8218   _collector->startTimer();
  8221 #ifndef PRODUCT
  8222 // This is actually very useful in a product build if it can
  8223 // be called from the debugger.  Compile it into the product
  8224 // as needed.
  8225 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
  8226   return debug_cms_space->verifyChunkInFreeLists(fc);
  8229 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
  8230   if (CMSTraceSweeper) {
  8231     gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
  8234 #endif
  8236 // CMSIsAliveClosure
  8237 bool CMSIsAliveClosure::do_object_b(oop obj) {
  8238   HeapWord* addr = (HeapWord*)obj;
  8239   return addr != NULL &&
  8240          (!_span.contains(addr) || _bit_map->isMarked(addr));
  8243 // CMSKeepAliveClosure: the serial version
  8244 void CMSKeepAliveClosure::do_oop(oop obj) {
  8245   HeapWord* addr = (HeapWord*)obj;
  8246   if (_span.contains(addr) &&
  8247       !_bit_map->isMarked(addr)) {
  8248     _bit_map->mark(addr);
  8249     bool simulate_overflow = false;
  8250     NOT_PRODUCT(
  8251       if (CMSMarkStackOverflowALot &&
  8252           _collector->simulate_overflow()) {
  8253         // simulate a stack overflow
  8254         simulate_overflow = true;
  8257     if (simulate_overflow || !_mark_stack->push(obj)) {
  8258       _collector->push_on_overflow_list(obj);
  8259       _collector->_ser_kac_ovflw++;
  8264 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
  8265 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
  8267 // CMSParKeepAliveClosure: a parallel version of the above.
  8268 // The work queues are private to each closure (thread),
  8269 // but (may be) available for stealing by other threads.
  8270 void CMSParKeepAliveClosure::do_oop(oop obj) {
  8271   HeapWord* addr = (HeapWord*)obj;
  8272   if (_span.contains(addr) &&
  8273       !_bit_map->isMarked(addr)) {
  8274     // In general, during recursive tracing, several threads
  8275     // may be concurrently getting here; the first one to
  8276     // "tag" it, claims it.
  8277     if (_bit_map->par_mark(addr)) {
  8278       bool res = _work_queue->push(obj);
  8279       assert(res, "Low water mark should be much less than capacity");
  8280       // Do a recursive trim in the hope that this will keep
  8281       // stack usage lower, but leave some oops for potential stealers
  8282       trim_queue(_low_water_mark);
  8283     } // Else, another thread got there first
  8287 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
  8288 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
  8290 void CMSParKeepAliveClosure::trim_queue(uint max) {
  8291   while (_work_queue->size() > max) {
  8292     oop new_oop;
  8293     if (_work_queue->pop_local(new_oop)) {
  8294       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
  8295       assert(_bit_map->isMarked((HeapWord*)new_oop),
  8296              "no white objects on this stack!");
  8297       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
  8298       // iterate over the oops in this oop, marking and pushing
  8299       // the ones in CMS heap (i.e. in _span).
  8300       new_oop->oop_iterate(&_mark_and_push);
  8305 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
  8306   HeapWord* addr = (HeapWord*)obj;
  8307   if (_span.contains(addr) &&
  8308       !_bit_map->isMarked(addr)) {
  8309     if (_bit_map->par_mark(addr)) {
  8310       bool simulate_overflow = false;
  8311       NOT_PRODUCT(
  8312         if (CMSMarkStackOverflowALot &&
  8313             _collector->par_simulate_overflow()) {
  8314           // simulate a stack overflow
  8315           simulate_overflow = true;
  8318       if (simulate_overflow || !_work_queue->push(obj)) {
  8319         _collector->par_push_on_overflow_list(obj);
  8320         _collector->_par_kac_ovflw++;
  8322     } // Else another thread got there already
  8326 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
  8327 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
  8329 //////////////////////////////////////////////////////////////////
  8330 //  CMSExpansionCause                /////////////////////////////
  8331 //////////////////////////////////////////////////////////////////
  8332 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
  8333   switch (cause) {
  8334     case _no_expansion:
  8335       return "No expansion";
  8336     case _satisfy_free_ratio:
  8337       return "Free ratio";
  8338     case _satisfy_promotion:
  8339       return "Satisfy promotion";
  8340     case _satisfy_allocation:
  8341       return "allocation";
  8342     case _allocate_par_lab:
  8343       return "Par LAB";
  8344     case _allocate_par_spooling_space:
  8345       return "Par Spooling Space";
  8346     case _adaptive_size_policy:
  8347       return "Ergonomics";
  8348     default:
  8349       return "unknown";
  8353 void CMSDrainMarkingStackClosure::do_void() {
  8354   // the max number to take from overflow list at a time
  8355   const size_t num = _mark_stack->capacity()/4;
  8356   while (!_mark_stack->isEmpty() ||
  8357          // if stack is empty, check the overflow list
  8358          _collector->take_from_overflow_list(num, _mark_stack)) {
  8359     oop obj = _mark_stack->pop();
  8360     HeapWord* addr = (HeapWord*)obj;
  8361     assert(_span.contains(addr), "Should be within span");
  8362     assert(_bit_map->isMarked(addr), "Should be marked");
  8363     assert(obj->is_oop(), "Should be an oop");
  8364     obj->oop_iterate(_keep_alive);
  8368 void CMSParDrainMarkingStackClosure::do_void() {
  8369   // drain queue
  8370   trim_queue(0);
  8373 // Trim our work_queue so its length is below max at return
  8374 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
  8375   while (_work_queue->size() > max) {
  8376     oop new_oop;
  8377     if (_work_queue->pop_local(new_oop)) {
  8378       assert(new_oop->is_oop(), "Expected an oop");
  8379       assert(_bit_map->isMarked((HeapWord*)new_oop),
  8380              "no white objects on this stack!");
  8381       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
  8382       // iterate over the oops in this oop, marking and pushing
  8383       // the ones in CMS heap (i.e. in _span).
  8384       new_oop->oop_iterate(&_mark_and_push);
  8389 ////////////////////////////////////////////////////////////////////
  8390 // Support for Marking Stack Overflow list handling and related code
  8391 ////////////////////////////////////////////////////////////////////
  8392 // Much of the following code is similar in shape and spirit to the
  8393 // code used in ParNewGC. We should try and share that code
  8394 // as much as possible in the future.
  8396 #ifndef PRODUCT
  8397 // Debugging support for CMSStackOverflowALot
  8399 // It's OK to call this multi-threaded;  the worst thing
  8400 // that can happen is that we'll get a bunch of closely
  8401 // spaced simulated oveflows, but that's OK, in fact
  8402 // probably good as it would exercise the overflow code
  8403 // under contention.
  8404 bool CMSCollector::simulate_overflow() {
  8405   if (_overflow_counter-- <= 0) { // just being defensive
  8406     _overflow_counter = CMSMarkStackOverflowInterval;
  8407     return true;
  8408   } else {
  8409     return false;
  8413 bool CMSCollector::par_simulate_overflow() {
  8414   return simulate_overflow();
  8416 #endif
  8418 // Single-threaded
  8419 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
  8420   assert(stack->isEmpty(), "Expected precondition");
  8421   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
  8422   size_t i = num;
  8423   oop  cur = _overflow_list;
  8424   const markOop proto = markOopDesc::prototype();
  8425   NOT_PRODUCT(size_t n = 0;)
  8426   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
  8427     next = oop(cur->mark());
  8428     cur->set_mark(proto);   // until proven otherwise
  8429     assert(cur->is_oop(), "Should be an oop");
  8430     bool res = stack->push(cur);
  8431     assert(res, "Bit off more than can chew?");
  8432     NOT_PRODUCT(n++;)
  8434   _overflow_list = cur;
  8435 #ifndef PRODUCT
  8436   assert(_num_par_pushes >= n, "Too many pops?");
  8437   _num_par_pushes -=n;
  8438 #endif
  8439   return !stack->isEmpty();
  8442 // Multi-threaded; use CAS to break off a prefix
  8443 bool CMSCollector::par_take_from_overflow_list(size_t num,
  8444                                                OopTaskQueue* work_q) {
  8445   assert(work_q->size() == 0, "That's the current policy");
  8446   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
  8447   if (_overflow_list == NULL) {
  8448     return false;
  8450   // Grab the entire list; we'll put back a suffix
  8451   oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
  8452   if (prefix == NULL) {  // someone grabbed it before we did ...
  8453     // ... we could spin for a short while, but for now we don't
  8454     return false;
  8456   size_t i = num;
  8457   oop cur = prefix;
  8458   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
  8459   if (cur->mark() != NULL) {
  8460     oop suffix_head = cur->mark(); // suffix will be put back on global list
  8461     cur->set_mark(NULL);           // break off suffix
  8462     // Find tail of suffix so we can prepend suffix to global list
  8463     for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
  8464     oop suffix_tail = cur;
  8465     assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
  8466            "Tautology");
  8467     oop observed_overflow_list = _overflow_list;
  8468     do {
  8469       cur = observed_overflow_list;
  8470       suffix_tail->set_mark(markOop(cur));
  8471       observed_overflow_list =
  8472         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur);
  8473     } while (cur != observed_overflow_list);
  8476   // Push the prefix elements on work_q
  8477   assert(prefix != NULL, "control point invariant");
  8478   const markOop proto = markOopDesc::prototype();
  8479   oop next;
  8480   NOT_PRODUCT(size_t n = 0;)
  8481   for (cur = prefix; cur != NULL; cur = next) {
  8482     next = oop(cur->mark());
  8483     cur->set_mark(proto);   // until proven otherwise
  8484     assert(cur->is_oop(), "Should be an oop");
  8485     bool res = work_q->push(cur);
  8486     assert(res, "Bit off more than we can chew?");
  8487     NOT_PRODUCT(n++;)
  8489 #ifndef PRODUCT
  8490   assert(_num_par_pushes >= n, "Too many pops?");
  8491   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
  8492 #endif
  8493   return true;
  8496 // Single-threaded
  8497 void CMSCollector::push_on_overflow_list(oop p) {
  8498   NOT_PRODUCT(_num_par_pushes++;)
  8499   assert(p->is_oop(), "Not an oop");
  8500   preserve_mark_if_necessary(p);
  8501   p->set_mark((markOop)_overflow_list);
  8502   _overflow_list = p;
  8505 // Multi-threaded; use CAS to prepend to overflow list
  8506 void CMSCollector::par_push_on_overflow_list(oop p) {
  8507   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
  8508   assert(p->is_oop(), "Not an oop");
  8509   par_preserve_mark_if_necessary(p);
  8510   oop observed_overflow_list = _overflow_list;
  8511   oop cur_overflow_list;
  8512   do {
  8513     cur_overflow_list = observed_overflow_list;
  8514     p->set_mark(markOop(cur_overflow_list));
  8515     observed_overflow_list =
  8516       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
  8517   } while (cur_overflow_list != observed_overflow_list);
  8520 // Single threaded
  8521 // General Note on GrowableArray: pushes may silently fail
  8522 // because we are (temporarily) out of C-heap for expanding
  8523 // the stack. The problem is quite ubiquitous and affects
  8524 // a lot of code in the JVM. The prudent thing for GrowableArray
  8525 // to do (for now) is to exit with an error. However, that may
  8526 // be too draconian in some cases because the caller may be
  8527 // able to recover without much harm. For suych cases, we
  8528 // should probably introduce a "soft_push" method which returns
  8529 // an indication of success or failure with the assumption that
  8530 // the caller may be able to recover from a failure; code in
  8531 // the VM can then be changed, incrementally, to deal with such
  8532 // failures where possible, thus, incrementally hardening the VM
  8533 // in such low resource situations.
  8534 void CMSCollector::preserve_mark_work(oop p, markOop m) {
  8535   int PreserveMarkStackSize = 128;
  8537   if (_preserved_oop_stack == NULL) {
  8538     assert(_preserved_mark_stack == NULL,
  8539            "bijection with preserved_oop_stack");
  8540     // Allocate the stacks
  8541     _preserved_oop_stack  = new (ResourceObj::C_HEAP)
  8542       GrowableArray<oop>(PreserveMarkStackSize, true);
  8543     _preserved_mark_stack = new (ResourceObj::C_HEAP)
  8544       GrowableArray<markOop>(PreserveMarkStackSize, true);
  8545     if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
  8546       vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
  8547                             "Preserved Mark/Oop Stack for CMS (C-heap)");
  8550   _preserved_oop_stack->push(p);
  8551   _preserved_mark_stack->push(m);
  8552   assert(m == p->mark(), "Mark word changed");
  8553   assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
  8554          "bijection");
  8557 // Single threaded
  8558 void CMSCollector::preserve_mark_if_necessary(oop p) {
  8559   markOop m = p->mark();
  8560   if (m->must_be_preserved(p)) {
  8561     preserve_mark_work(p, m);
  8565 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
  8566   markOop m = p->mark();
  8567   if (m->must_be_preserved(p)) {
  8568     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  8569     // Even though we read the mark word without holding
  8570     // the lock, we are assured that it will not change
  8571     // because we "own" this oop, so no other thread can
  8572     // be trying to push it on the overflow list; see
  8573     // the assertion in preserve_mark_work() that checks
  8574     // that m == p->mark().
  8575     preserve_mark_work(p, m);
  8579 // We should be able to do this multi-threaded,
  8580 // a chunk of stack being a task (this is
  8581 // correct because each oop only ever appears
  8582 // once in the overflow list. However, it's
  8583 // not very easy to completely overlap this with
  8584 // other operations, so will generally not be done
  8585 // until all work's been completed. Because we
  8586 // expect the preserved oop stack (set) to be small,
  8587 // it's probably fine to do this single-threaded.
  8588 // We can explore cleverer concurrent/overlapped/parallel
  8589 // processing of preserved marks if we feel the
  8590 // need for this in the future. Stack overflow should
  8591 // be so rare in practice and, when it happens, its
  8592 // effect on performance so great that this will
  8593 // likely just be in the noise anyway.
  8594 void CMSCollector::restore_preserved_marks_if_any() {
  8595   if (_preserved_oop_stack == NULL) {
  8596     assert(_preserved_mark_stack == NULL,
  8597            "bijection with preserved_oop_stack");
  8598     return;
  8601   assert(SafepointSynchronize::is_at_safepoint(),
  8602          "world should be stopped");
  8603   assert(Thread::current()->is_ConcurrentGC_thread() ||
  8604          Thread::current()->is_VM_thread(),
  8605          "should be single-threaded");
  8607   int length = _preserved_oop_stack->length();
  8608   assert(_preserved_mark_stack->length() == length, "bijection");
  8609   for (int i = 0; i < length; i++) {
  8610     oop p = _preserved_oop_stack->at(i);
  8611     assert(p->is_oop(), "Should be an oop");
  8612     assert(_span.contains(p), "oop should be in _span");
  8613     assert(p->mark() == markOopDesc::prototype(),
  8614            "Set when taken from overflow list");
  8615     markOop m = _preserved_mark_stack->at(i);
  8616     p->set_mark(m);
  8618   _preserved_mark_stack->clear();
  8619   _preserved_oop_stack->clear();
  8620   assert(_preserved_mark_stack->is_empty() &&
  8621          _preserved_oop_stack->is_empty(),
  8622          "stacks were cleared above");
  8625 #ifndef PRODUCT
  8626 bool CMSCollector::no_preserved_marks() const {
  8627   return (   (   _preserved_mark_stack == NULL
  8628               && _preserved_oop_stack == NULL)
  8629           || (   _preserved_mark_stack->is_empty()
  8630               && _preserved_oop_stack->is_empty()));
  8632 #endif
  8634 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
  8636   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
  8637   CMSAdaptiveSizePolicy* size_policy =
  8638     (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
  8639   assert(size_policy->is_gc_cms_adaptive_size_policy(),
  8640     "Wrong type for size policy");
  8641   return size_policy;
  8644 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
  8645                                            size_t desired_promo_size) {
  8646   if (cur_promo_size < desired_promo_size) {
  8647     size_t expand_bytes = desired_promo_size - cur_promo_size;
  8648     if (PrintAdaptiveSizePolicy && Verbose) {
  8649       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
  8650         "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
  8651         expand_bytes);
  8653     expand(expand_bytes,
  8654            MinHeapDeltaBytes,
  8655            CMSExpansionCause::_adaptive_size_policy);
  8656   } else if (desired_promo_size < cur_promo_size) {
  8657     size_t shrink_bytes = cur_promo_size - desired_promo_size;
  8658     if (PrintAdaptiveSizePolicy && Verbose) {
  8659       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
  8660         "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
  8661         shrink_bytes);
  8663     shrink(shrink_bytes);
  8667 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
  8668   GenCollectedHeap* gch = GenCollectedHeap::heap();
  8669   CMSGCAdaptivePolicyCounters* counters =
  8670     (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
  8671   assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
  8672     "Wrong kind of counters");
  8673   return counters;
  8677 void ASConcurrentMarkSweepGeneration::update_counters() {
  8678   if (UsePerfData) {
  8679     _space_counters->update_all();
  8680     _gen_counters->update_all();
  8681     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
  8682     GenCollectedHeap* gch = GenCollectedHeap::heap();
  8683     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
  8684     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
  8685       "Wrong gc statistics type");
  8686     counters->update_counters(gc_stats_l);
  8690 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
  8691   if (UsePerfData) {
  8692     _space_counters->update_used(used);
  8693     _space_counters->update_capacity();
  8694     _gen_counters->update_all();
  8696     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
  8697     GenCollectedHeap* gch = GenCollectedHeap::heap();
  8698     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
  8699     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
  8700       "Wrong gc statistics type");
  8701     counters->update_counters(gc_stats_l);
  8705 // The desired expansion delta is computed so that:
  8706 // . desired free percentage or greater is used
  8707 void ASConcurrentMarkSweepGeneration::compute_new_size() {
  8708   assert_locked_or_safepoint(Heap_lock);
  8710   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
  8712   // If incremental collection failed, we just want to expand
  8713   // to the limit.
  8714   if (incremental_collection_failed()) {
  8715     clear_incremental_collection_failed();
  8716     grow_to_reserved();
  8717     return;
  8720   assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
  8722   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
  8723     "Wrong type of heap");
  8724   int prev_level = level() - 1;
  8725   assert(prev_level >= 0, "The cms generation is the lowest generation");
  8726   Generation* prev_gen = gch->get_gen(prev_level);
  8727   assert(prev_gen->kind() == Generation::ASParNew,
  8728     "Wrong type of young generation");
  8729   ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
  8730   size_t cur_eden = younger_gen->eden()->capacity();
  8731   CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
  8732   size_t cur_promo = free();
  8733   size_policy->compute_tenured_generation_free_space(cur_promo,
  8734                                                        max_available(),
  8735                                                        cur_eden);
  8736   resize(cur_promo, size_policy->promo_size());
  8738   // Record the new size of the space in the cms generation
  8739   // that is available for promotions.  This is temporary.
  8740   // It should be the desired promo size.
  8741   size_policy->avg_cms_promo()->sample(free());
  8742   size_policy->avg_old_live()->sample(used());
  8744   if (UsePerfData) {
  8745     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
  8746     counters->update_cms_capacity_counter(capacity());
  8750 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
  8751   assert_locked_or_safepoint(Heap_lock);
  8752   assert_lock_strong(freelistLock());
  8753   HeapWord* old_end = _cmsSpace->end();
  8754   HeapWord* unallocated_start = _cmsSpace->unallocated_block();
  8755   assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
  8756   FreeChunk* chunk_at_end = find_chunk_at_end();
  8757   if (chunk_at_end == NULL) {
  8758     // No room to shrink
  8759     if (PrintGCDetails && Verbose) {
  8760       gclog_or_tty->print_cr("No room to shrink: old_end  "
  8761         PTR_FORMAT "  unallocated_start  " PTR_FORMAT
  8762         " chunk_at_end  " PTR_FORMAT,
  8763         old_end, unallocated_start, chunk_at_end);
  8765     return;
  8766   } else {
  8768     // Find the chunk at the end of the space and determine
  8769     // how much it can be shrunk.
  8770     size_t shrinkable_size_in_bytes = chunk_at_end->size();
  8771     size_t aligned_shrinkable_size_in_bytes =
  8772       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
  8773     assert(unallocated_start <= chunk_at_end->end(),
  8774       "Inconsistent chunk at end of space");
  8775     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
  8776     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
  8778     // Shrink the underlying space
  8779     _virtual_space.shrink_by(bytes);
  8780     if (PrintGCDetails && Verbose) {
  8781       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
  8782         " desired_bytes " SIZE_FORMAT
  8783         " shrinkable_size_in_bytes " SIZE_FORMAT
  8784         " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
  8785         "  bytes  " SIZE_FORMAT,
  8786         desired_bytes, shrinkable_size_in_bytes,
  8787         aligned_shrinkable_size_in_bytes, bytes);
  8788       gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
  8789         "  unallocated_start  " SIZE_FORMAT,
  8790         old_end, unallocated_start);
  8793     // If the space did shrink (shrinking is not guaranteed),
  8794     // shrink the chunk at the end by the appropriate amount.
  8795     if (((HeapWord*)_virtual_space.high()) < old_end) {
  8796       size_t new_word_size =
  8797         heap_word_size(_virtual_space.committed_size());
  8799       // Have to remove the chunk from the dictionary because it is changing
  8800       // size and might be someplace elsewhere in the dictionary.
  8802       // Get the chunk at end, shrink it, and put it
  8803       // back.
  8804       _cmsSpace->removeChunkFromDictionary(chunk_at_end);
  8805       size_t word_size_change = word_size_before - new_word_size;
  8806       size_t chunk_at_end_old_size = chunk_at_end->size();
  8807       assert(chunk_at_end_old_size >= word_size_change,
  8808         "Shrink is too large");
  8809       chunk_at_end->setSize(chunk_at_end_old_size -
  8810                           word_size_change);
  8811       _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
  8812         word_size_change);
  8814       _cmsSpace->returnChunkToDictionary(chunk_at_end);
  8816       MemRegion mr(_cmsSpace->bottom(), new_word_size);
  8817       _bts->resize(new_word_size);  // resize the block offset shared array
  8818       Universe::heap()->barrier_set()->resize_covered_region(mr);
  8819       _cmsSpace->assert_locked();
  8820       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
  8822       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
  8824       // update the space and generation capacity counters
  8825       if (UsePerfData) {
  8826         _space_counters->update_capacity();
  8827         _gen_counters->update_all();
  8830       if (Verbose && PrintGCDetails) {
  8831         size_t new_mem_size = _virtual_space.committed_size();
  8832         size_t old_mem_size = new_mem_size + bytes;
  8833         gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
  8834                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
  8838     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
  8839       "Inconsistency at end of space");
  8840     assert(chunk_at_end->end() == _cmsSpace->end(),
  8841       "Shrinking is inconsistent");
  8842     return;
  8846 // Transfer some number of overflown objects to usual marking
  8847 // stack. Return true if some objects were transferred.
  8848 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
  8849   size_t num = MIN2((size_t)_mark_stack->capacity()/4,
  8850                     (size_t)ParGCDesiredObjsFromOverflowList);
  8852   bool res = _collector->take_from_overflow_list(num, _mark_stack);
  8853   assert(_collector->overflow_list_is_empty() || res,
  8854          "If list is not empty, we should have taken something");
  8855   assert(!res || !_mark_stack->isEmpty(),
  8856          "If we took something, it should now be on our stack");
  8857   return res;
  8860 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
  8861   size_t res = _sp->block_size_no_stall(addr, _collector);
  8862   assert(res != 0, "Should always be able to compute a size");
  8863   if (_sp->block_is_obj(addr)) {
  8864     if (_live_bit_map->isMarked(addr)) {
  8865       // It can't have been dead in a previous cycle
  8866       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
  8867     } else {
  8868       _dead_bit_map->mark(addr);      // mark the dead object
  8871   return res;

mercurial