src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp

Thu, 20 Nov 2008 16:56:09 -0800

author
ysr
date
Thu, 20 Nov 2008 16:56:09 -0800
changeset 888
c96030fff130
parent 887
00b023ae2d78
child 952
e9be0e04635a
permissions
-rw-r--r--

6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa

     1 /*
     2  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // ConcurrentMarkSweepGeneration is in support of a concurrent
    26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
    27 // style. We assume, for now, that this generation is always the
    28 // seniormost generation (modulo the PermGeneration), and for simplicity
    29 // in the first implementation, that this generation is a single compactible
    30 // space. Neither of these restrictions appears essential, and will be
    31 // relaxed in the future when more time is available to implement the
    32 // greater generality (and there's a need for it).
    33 //
    34 // Concurrent mode failures are currently handled by
    35 // means of a sliding mark-compact.
    37 class CMSAdaptiveSizePolicy;
    38 class CMSConcMarkingTask;
    39 class CMSGCAdaptivePolicyCounters;
    40 class ConcurrentMarkSweepGeneration;
    41 class ConcurrentMarkSweepPolicy;
    42 class ConcurrentMarkSweepThread;
    43 class CompactibleFreeListSpace;
    44 class FreeChunk;
    45 class PromotionInfo;
    46 class ScanMarkedObjectsAgainCarefullyClosure;
    48 // A generic CMS bit map. It's the basis for both the CMS marking bit map
    49 // as well as for the mod union table (in each case only a subset of the
    50 // methods are used). This is essentially a wrapper around the BitMap class,
    51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
    52 // we have _shifter == 0. and for the mod union table we have
    53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
    54 // XXX 64-bit issues in BitMap?
    55 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
    56   friend class VMStructs;
    58   HeapWord* _bmStartWord;   // base address of range covered by map
    59   size_t    _bmWordSize;    // map size (in #HeapWords covered)
    60   const int _shifter;       // shifts to convert HeapWord to bit position
    61   VirtualSpace _virtual_space; // underlying the bit map
    62   BitMap    _bm;            // the bit map itself
    63  public:
    64   Mutex* const _lock;       // mutex protecting _bm;
    66  public:
    67   // constructor
    68   CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
    70   // allocates the actual storage for the map
    71   bool allocate(MemRegion mr);
    72   // field getter
    73   Mutex* lock() const { return _lock; }
    74   // locking verifier convenience function
    75   void assert_locked() const PRODUCT_RETURN;
    77   // inquiries
    78   HeapWord* startWord()   const { return _bmStartWord; }
    79   size_t    sizeInWords() const { return _bmWordSize;  }
    80   size_t    sizeInBits()  const { return _bm.size();   }
    81   // the following is one past the last word in space
    82   HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
    84   // reading marks
    85   bool isMarked(HeapWord* addr) const;
    86   bool par_isMarked(HeapWord* addr) const; // do not lock checks
    87   bool isUnmarked(HeapWord* addr) const;
    88   bool isAllClear() const;
    90   // writing marks
    91   void mark(HeapWord* addr);
    92   // For marking by parallel GC threads;
    93   // returns true if we did, false if another thread did
    94   bool par_mark(HeapWord* addr);
    96   void mark_range(MemRegion mr);
    97   void par_mark_range(MemRegion mr);
    98   void mark_large_range(MemRegion mr);
    99   void par_mark_large_range(MemRegion mr);
   100   void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
   101   void clear_range(MemRegion mr);
   102   void par_clear_range(MemRegion mr);
   103   void clear_large_range(MemRegion mr);
   104   void par_clear_large_range(MemRegion mr);
   105   void clear_all();
   106   void clear_all_incrementally();  // Not yet implemented!!
   108   NOT_PRODUCT(
   109     // checks the memory region for validity
   110     void region_invariant(MemRegion mr);
   111   )
   113   // iteration
   114   void iterate(BitMapClosure* cl) {
   115     _bm.iterate(cl);
   116   }
   117   void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
   118   void dirty_range_iterate_clear(MemRegionClosure* cl);
   119   void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
   121   // auxiliary support for iteration
   122   HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
   123   HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
   124                                             HeapWord* end_addr) const;
   125   HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
   126   HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
   127                                               HeapWord* end_addr) const;
   128   MemRegion getAndClearMarkedRegion(HeapWord* addr);
   129   MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
   130                                            HeapWord* end_addr);
   132   // conversion utilities
   133   HeapWord* offsetToHeapWord(size_t offset) const;
   134   size_t    heapWordToOffset(HeapWord* addr) const;
   135   size_t    heapWordDiffToOffsetDiff(size_t diff) const;
   137   // debugging
   138   // is this address range covered by the bit-map?
   139   NOT_PRODUCT(
   140     bool covers(MemRegion mr) const;
   141     bool covers(HeapWord* start, size_t size = 0) const;
   142   )
   143   void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
   144 };
   146 // Represents a marking stack used by the CMS collector.
   147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
   148 class CMSMarkStack: public CHeapObj  {
   149   //
   150   friend class CMSCollector;   // to get at expasion stats further below
   151   //
   153   VirtualSpace _virtual_space;  // space for the stack
   154   oop*   _base;      // bottom of stack
   155   size_t _index;     // one more than last occupied index
   156   size_t _capacity;  // max #elements
   157   Mutex  _par_lock;  // an advisory lock used in case of parallel access
   158   NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run
   160  protected:
   161   size_t _hit_limit;      // we hit max stack size limit
   162   size_t _failed_double;  // we failed expansion before hitting limit
   164  public:
   165   CMSMarkStack():
   166     _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
   167     _hit_limit(0),
   168     _failed_double(0) {}
   170   bool allocate(size_t size);
   172   size_t capacity() const { return _capacity; }
   174   oop pop() {
   175     if (!isEmpty()) {
   176       return _base[--_index] ;
   177     }
   178     return NULL;
   179   }
   181   bool push(oop ptr) {
   182     if (isFull()) {
   183       return false;
   184     } else {
   185       _base[_index++] = ptr;
   186       NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
   187       return true;
   188     }
   189   }
   191   bool isEmpty() const { return _index == 0; }
   192   bool isFull()  const {
   193     assert(_index <= _capacity, "buffer overflow");
   194     return _index == _capacity;
   195   }
   197   size_t length() { return _index; }
   199   // "Parallel versions" of some of the above
   200   oop par_pop() {
   201     // lock and pop
   202     MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
   203     return pop();
   204   }
   206   bool par_push(oop ptr) {
   207     // lock and push
   208     MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
   209     return push(ptr);
   210   }
   212   // Forcibly reset the stack, losing all of its contents.
   213   void reset() {
   214     _index = 0;
   215   }
   217   // Expand the stack, typically in response to an overflow condition
   218   void expand();
   220   // Compute the least valued stack element.
   221   oop least_value(HeapWord* low) {
   222      oop least = (oop)low;
   223      for (size_t i = 0; i < _index; i++) {
   224        least = MIN2(least, _base[i]);
   225      }
   226      return least;
   227   }
   229   // Exposed here to allow stack expansion in || case
   230   Mutex* par_lock() { return &_par_lock; }
   231 };
   233 class CardTableRS;
   234 class CMSParGCThreadState;
   236 class ModUnionClosure: public MemRegionClosure {
   237  protected:
   238   CMSBitMap* _t;
   239  public:
   240   ModUnionClosure(CMSBitMap* t): _t(t) { }
   241   void do_MemRegion(MemRegion mr);
   242 };
   244 class ModUnionClosurePar: public ModUnionClosure {
   245  public:
   246   ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
   247   void do_MemRegion(MemRegion mr);
   248 };
   250 // Survivor Chunk Array in support of parallelization of
   251 // Survivor Space rescan.
   252 class ChunkArray: public CHeapObj {
   253   size_t _index;
   254   size_t _capacity;
   255   HeapWord** _array;   // storage for array
   257  public:
   258   ChunkArray() : _index(0), _capacity(0), _array(NULL) {}
   259   ChunkArray(HeapWord** a, size_t c):
   260     _index(0), _capacity(c), _array(a) {}
   262   HeapWord** array() { return _array; }
   263   void set_array(HeapWord** a) { _array = a; }
   265   size_t capacity() { return _capacity; }
   266   void set_capacity(size_t c) { _capacity = c; }
   268   size_t end() {
   269     assert(_index < capacity(), "_index out of bounds");
   270     return _index;
   271   }  // exclusive
   273   HeapWord* nth(size_t n) {
   274     assert(n < end(), "Out of bounds access");
   275     return _array[n];
   276   }
   278   void reset() {
   279     _index = 0;
   280   }
   282   void record_sample(HeapWord* p, size_t sz) {
   283     // For now we do not do anything with the size
   284     if (_index < _capacity) {
   285       _array[_index++] = p;
   286     }
   287   }
   288 };
   290 //
   291 // Timing, allocation and promotion statistics for gc scheduling and incremental
   292 // mode pacing.  Most statistics are exponential averages.
   293 //
   294 class CMSStats VALUE_OBJ_CLASS_SPEC {
   295  private:
   296   ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.
   298   // The following are exponential averages with factor alpha:
   299   //   avg = (100 - alpha) * avg + alpha * cur_sample
   300   //
   301   //   The durations measure:  end_time[n] - start_time[n]
   302   //   The periods measure:    start_time[n] - start_time[n-1]
   303   //
   304   // The cms period and duration include only concurrent collections; time spent
   305   // in foreground cms collections due to System.gc() or because of a failure to
   306   // keep up are not included.
   307   //
   308   // There are 3 alphas to "bootstrap" the statistics.  The _saved_alpha is the
   309   // real value, but is used only after the first period.  A value of 100 is
   310   // used for the first sample so it gets the entire weight.
   311   unsigned int _saved_alpha; // 0-100
   312   unsigned int _gc0_alpha;
   313   unsigned int _cms_alpha;
   315   double _gc0_duration;
   316   double _gc0_period;
   317   size_t _gc0_promoted;         // bytes promoted per gc0
   318   double _cms_duration;
   319   double _cms_duration_pre_sweep; // time from initiation to start of sweep
   320   double _cms_duration_per_mb;
   321   double _cms_period;
   322   size_t _cms_allocated;        // bytes of direct allocation per gc0 period
   324   // Timers.
   325   elapsedTimer _cms_timer;
   326   TimeStamp    _gc0_begin_time;
   327   TimeStamp    _cms_begin_time;
   328   TimeStamp    _cms_end_time;
   330   // Snapshots of the amount used in the CMS generation.
   331   size_t _cms_used_at_gc0_begin;
   332   size_t _cms_used_at_gc0_end;
   333   size_t _cms_used_at_cms_begin;
   335   // Used to prevent the duty cycle from being reduced in the middle of a cms
   336   // cycle.
   337   bool _allow_duty_cycle_reduction;
   339   enum {
   340     _GC0_VALID = 0x1,
   341     _CMS_VALID = 0x2,
   342     _ALL_VALID = _GC0_VALID | _CMS_VALID
   343   };
   345   unsigned int _valid_bits;
   347   unsigned int _icms_duty_cycle;        // icms duty cycle (0-100).
   349  protected:
   351   // Return a duty cycle that avoids wild oscillations, by limiting the amount
   352   // of change between old_duty_cycle and new_duty_cycle (the latter is treated
   353   // as a recommended value).
   354   static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
   355                                              unsigned int new_duty_cycle);
   356   unsigned int icms_update_duty_cycle_impl();
   358  public:
   359   CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
   360            unsigned int alpha = CMSExpAvgFactor);
   362   // Whether or not the statistics contain valid data; higher level statistics
   363   // cannot be called until this returns true (they require at least one young
   364   // gen and one cms cycle to have completed).
   365   bool valid() const;
   367   // Record statistics.
   368   void record_gc0_begin();
   369   void record_gc0_end(size_t cms_gen_bytes_used);
   370   void record_cms_begin();
   371   void record_cms_end();
   373   // Allow management of the cms timer, which must be stopped/started around
   374   // yield points.
   375   elapsedTimer& cms_timer()     { return _cms_timer; }
   376   void start_cms_timer()        { _cms_timer.start(); }
   377   void stop_cms_timer()         { _cms_timer.stop(); }
   379   // Basic statistics; units are seconds or bytes.
   380   double gc0_period() const     { return _gc0_period; }
   381   double gc0_duration() const   { return _gc0_duration; }
   382   size_t gc0_promoted() const   { return _gc0_promoted; }
   383   double cms_period() const          { return _cms_period; }
   384   double cms_duration() const        { return _cms_duration; }
   385   double cms_duration_per_mb() const { return _cms_duration_per_mb; }
   386   size_t cms_allocated() const       { return _cms_allocated; }
   388   size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
   390   // Seconds since the last background cms cycle began or ended.
   391   double cms_time_since_begin() const;
   392   double cms_time_since_end() const;
   394   // Higher level statistics--caller must check that valid() returns true before
   395   // calling.
   397   // Returns bytes promoted per second of wall clock time.
   398   double promotion_rate() const;
   400   // Returns bytes directly allocated per second of wall clock time.
   401   double cms_allocation_rate() const;
   403   // Rate at which space in the cms generation is being consumed (sum of the
   404   // above two).
   405   double cms_consumption_rate() const;
   407   // Returns an estimate of the number of seconds until the cms generation will
   408   // fill up, assuming no collection work is done.
   409   double time_until_cms_gen_full() const;
   411   // Returns an estimate of the number of seconds remaining until
   412   // the cms generation collection should start.
   413   double time_until_cms_start() const;
   415   // End of higher level statistics.
   417   // Returns the cms incremental mode duty cycle, as a percentage (0-100).
   418   unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
   420   // Update the duty cycle and return the new value.
   421   unsigned int icms_update_duty_cycle();
   423   // Debugging.
   424   void print_on(outputStream* st) const PRODUCT_RETURN;
   425   void print() const { print_on(gclog_or_tty); }
   426 };
   428 // A closure related to weak references processing which
   429 // we embed in the CMSCollector, since we need to pass
   430 // it to the reference processor for secondary filtering
   431 // of references based on reachability of referent;
   432 // see role of _is_alive_non_header closure in the
   433 // ReferenceProcessor class.
   434 // For objects in the CMS generation, this closure checks
   435 // if the object is "live" (reachable). Used in weak
   436 // reference processing.
   437 class CMSIsAliveClosure: public BoolObjectClosure {
   438   const MemRegion  _span;
   439   const CMSBitMap* _bit_map;
   441   friend class CMSCollector;
   442  public:
   443   CMSIsAliveClosure(MemRegion span,
   444                     CMSBitMap* bit_map):
   445     _span(span),
   446     _bit_map(bit_map) {
   447     assert(!span.is_empty(), "Empty span could spell trouble");
   448   }
   450   void do_object(oop obj) {
   451     assert(false, "not to be invoked");
   452   }
   454   bool do_object_b(oop obj);
   455 };
   458 // Implements AbstractRefProcTaskExecutor for CMS.
   459 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
   460 public:
   462   CMSRefProcTaskExecutor(CMSCollector& collector)
   463     : _collector(collector)
   464   { }
   466   // Executes a task using worker threads.
   467   virtual void execute(ProcessTask& task);
   468   virtual void execute(EnqueueTask& task);
   469 private:
   470   CMSCollector& _collector;
   471 };
   474 class CMSCollector: public CHeapObj {
   475   friend class VMStructs;
   476   friend class ConcurrentMarkSweepThread;
   477   friend class ConcurrentMarkSweepGeneration;
   478   friend class CompactibleFreeListSpace;
   479   friend class CMSParRemarkTask;
   480   friend class CMSConcMarkingTask;
   481   friend class CMSRefProcTaskProxy;
   482   friend class CMSRefProcTaskExecutor;
   483   friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
   484   friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
   485   friend class PushOrMarkClosure;             // to access _restart_addr
   486   friend class Par_PushOrMarkClosure;             // to access _restart_addr
   487   friend class MarkFromRootsClosure;          //  -- ditto --
   488                                               // ... and for clearing cards
   489   friend class Par_MarkFromRootsClosure;      //  to access _restart_addr
   490                                               // ... and for clearing cards
   491   friend class Par_ConcMarkingClosure;        //  to access _restart_addr etc.
   492   friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
   493   friend class PushAndMarkVerifyClosure;      //  -- ditto --
   494   friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
   495   friend class PushAndMarkClosure;            //  -- ditto --
   496   friend class Par_PushAndMarkClosure;        //  -- ditto --
   497   friend class CMSKeepAliveClosure;           //  -- ditto --
   498   friend class CMSDrainMarkingStackClosure;   //  -- ditto --
   499   friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
   500   NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) //  assertion on _overflow_list
   501   friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
   502   friend class VM_CMS_Operation;
   503   friend class VM_CMS_Initial_Mark;
   504   friend class VM_CMS_Final_Remark;
   506  private:
   507   jlong _time_of_last_gc;
   508   void update_time_of_last_gc(jlong now) {
   509     _time_of_last_gc = now;
   510   }
   512   OopTaskQueueSet* _task_queues;
   514   // Overflow list of grey objects, threaded through mark-word
   515   // Manipulated with CAS in the parallel/multi-threaded case.
   516   oop _overflow_list;
   517   // The following array-pair keeps track of mark words
   518   // displaced for accomodating overflow list above.
   519   // This code will likely be revisited under RFE#4922830.
   520   GrowableArray<oop>*     _preserved_oop_stack;
   521   GrowableArray<markOop>* _preserved_mark_stack;
   523   int*             _hash_seed;
   525   // In support of multi-threaded concurrent phases
   526   YieldingFlexibleWorkGang* _conc_workers;
   528   // Performance Counters
   529   CollectorCounters* _gc_counters;
   531   // Initialization Errors
   532   bool _completed_initialization;
   534   // In support of ExplicitGCInvokesConcurrent
   535   static   bool _full_gc_requested;
   536   unsigned int  _collection_count_start;
   538   // Should we unload classes this concurrent cycle?
   539   bool _should_unload_classes;
   540   unsigned int  _concurrent_cycles_since_last_unload;
   541   unsigned int concurrent_cycles_since_last_unload() const {
   542     return _concurrent_cycles_since_last_unload;
   543   }
   544   // Did we (allow) unload classes in the previous concurrent cycle?
   545   bool unloaded_classes_last_cycle() const {
   546     return concurrent_cycles_since_last_unload() == 0;
   547   }
   549   // Verification support
   550   CMSBitMap     _verification_mark_bm;
   551   void verify_after_remark_work_1();
   552   void verify_after_remark_work_2();
   554   // true if any verification flag is on.
   555   bool _verifying;
   556   bool verifying() const { return _verifying; }
   557   void set_verifying(bool v) { _verifying = v; }
   559   // Collector policy
   560   ConcurrentMarkSweepPolicy* _collector_policy;
   561   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
   563   // Check whether the gc time limit has been
   564   // exceeded and set the size policy flag
   565   // appropriately.
   566   void check_gc_time_limit();
   567   // XXX Move these to CMSStats ??? FIX ME !!!
   568   elapsedTimer _sweep_timer;
   569   AdaptivePaddedAverage _sweep_estimate;
   571  protected:
   572   ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
   573   ConcurrentMarkSweepGeneration* _permGen; // perm gen
   574   MemRegion                      _span;    // span covering above two
   575   CardTableRS*                   _ct;      // card table
   577   // CMS marking support structures
   578   CMSBitMap     _markBitMap;
   579   CMSBitMap     _modUnionTable;
   580   CMSMarkStack  _markStack;
   581   CMSMarkStack  _revisitStack;            // used to keep track of klassKlass objects
   582                                           // to revisit
   583   CMSBitMap     _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
   585   HeapWord*     _restart_addr; // in support of marking stack overflow
   586   void          lower_restart_addr(HeapWord* low);
   588   // Counters in support of marking stack / work queue overflow handling:
   589   // a non-zero value indicates certain types of overflow events during
   590   // the current CMS cycle and could lead to stack resizing efforts at
   591   // an opportune future time.
   592   size_t        _ser_pmc_preclean_ovflw;
   593   size_t        _ser_pmc_remark_ovflw;
   594   size_t        _par_pmc_remark_ovflw;
   595   size_t        _ser_kac_preclean_ovflw;
   596   size_t        _ser_kac_ovflw;
   597   size_t        _par_kac_ovflw;
   598   NOT_PRODUCT(size_t _num_par_pushes;)
   600   // ("Weak") Reference processing support
   601   ReferenceProcessor*            _ref_processor;
   602   CMSIsAliveClosure              _is_alive_closure;
   603       // keep this textually after _markBitMap and _span; c'tor dependency
   605   ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
   606   ModUnionClosure    _modUnionClosure;
   607   ModUnionClosurePar _modUnionClosurePar;
   609   // CMS abstract state machine
   610   // initial_state: Idling
   611   // next_state(Idling)            = {Marking}
   612   // next_state(Marking)           = {Precleaning, Sweeping}
   613   // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
   614   // next_state(AbortablePreclean) = {FinalMarking}
   615   // next_state(FinalMarking)      = {Sweeping}
   616   // next_state(Sweeping)          = {Resizing}
   617   // next_state(Resizing)          = {Resetting}
   618   // next_state(Resetting)         = {Idling}
   619   // The numeric values below are chosen so that:
   620   // . _collectorState <= Idling ==  post-sweep && pre-mark
   621   // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
   622   //                                            precleaning || abortablePrecleanb
   623   enum CollectorState {
   624     Resizing            = 0,
   625     Resetting           = 1,
   626     Idling              = 2,
   627     InitialMarking      = 3,
   628     Marking             = 4,
   629     Precleaning         = 5,
   630     AbortablePreclean   = 6,
   631     FinalMarking        = 7,
   632     Sweeping            = 8
   633   };
   634   static CollectorState _collectorState;
   636   // State related to prologue/epilogue invocation for my generations
   637   bool _between_prologue_and_epilogue;
   639   // Signalling/State related to coordination between fore- and backgroud GC
   640   // Note: When the baton has been passed from background GC to foreground GC,
   641   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
   642   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
   643                                  // wants to go active
   644   static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
   645                                  // yet passed the baton to the foreground GC
   647   // Support for CMSScheduleRemark (abortable preclean)
   648   bool _abort_preclean;
   649   bool _start_sampling;
   651   int    _numYields;
   652   size_t _numDirtyCards;
   653   uint   _sweepCount;
   654   // number of full gc's since the last concurrent gc.
   655   uint   _full_gcs_since_conc_gc;
   657   // occupancy used for bootstrapping stats
   658   double _bootstrap_occupancy;
   660   // timer
   661   elapsedTimer _timer;
   663   // Timing, allocation and promotion statistics, used for scheduling.
   664   CMSStats      _stats;
   666   // Allocation limits installed in the young gen, used only in
   667   // CMSIncrementalMode.  When an allocation in the young gen would cross one of
   668   // these limits, the cms generation is notified and the cms thread is started
   669   // or stopped, respectively.
   670   HeapWord*     _icms_start_limit;
   671   HeapWord*     _icms_stop_limit;
   673   enum CMS_op_type {
   674     CMS_op_checkpointRootsInitial,
   675     CMS_op_checkpointRootsFinal
   676   };
   678   void do_CMS_operation(CMS_op_type op);
   679   bool stop_world_and_do(CMS_op_type op);
   681   OopTaskQueueSet* task_queues() { return _task_queues; }
   682   int*             hash_seed(int i) { return &_hash_seed[i]; }
   683   YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
   685   // Support for parallelizing Eden rescan in CMS remark phase
   686   void sample_eden(); // ... sample Eden space top
   688  private:
   689   // Support for parallelizing young gen rescan in CMS remark phase
   690   Generation* _young_gen;  // the younger gen
   691   HeapWord** _top_addr;    // ... Top of Eden
   692   HeapWord** _end_addr;    // ... End of Eden
   693   HeapWord** _eden_chunk_array; // ... Eden partitioning array
   694   size_t     _eden_chunk_index; // ... top (exclusive) of array
   695   size_t     _eden_chunk_capacity;  // ... max entries in array
   697   // Support for parallelizing survivor space rescan
   698   HeapWord** _survivor_chunk_array;
   699   size_t     _survivor_chunk_index;
   700   size_t     _survivor_chunk_capacity;
   701   size_t*    _cursor;
   702   ChunkArray* _survivor_plab_array;
   704   // Support for marking stack overflow handling
   705   bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
   706   bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q);
   707   void push_on_overflow_list(oop p);
   708   void par_push_on_overflow_list(oop p);
   709   // the following is, obviously, not, in general, "MT-stable"
   710   bool overflow_list_is_empty() const;
   712   void preserve_mark_if_necessary(oop p);
   713   void par_preserve_mark_if_necessary(oop p);
   714   void preserve_mark_work(oop p, markOop m);
   715   void restore_preserved_marks_if_any();
   716   NOT_PRODUCT(bool no_preserved_marks() const;)
   717   // in support of testing overflow code
   718   NOT_PRODUCT(int _overflow_counter;)
   719   NOT_PRODUCT(bool simulate_overflow();)       // sequential
   720   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
   722   int _roots_scanning_options;
   723   int roots_scanning_options() const      { return _roots_scanning_options; }
   724   void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
   725   void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
   727   // CMS work methods
   728   void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
   730   // a return value of false indicates failure due to stack overflow
   731   bool markFromRootsWork(bool asynch);  // concurrent marking work
   733  public:   // FIX ME!!! only for testing
   734   bool do_marking_st(bool asynch);      // single-threaded marking
   735   bool do_marking_mt(bool asynch);      // multi-threaded  marking
   737  private:
   739   // concurrent precleaning work
   740   size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
   741                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
   742   size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
   743                              ScanMarkedObjectsAgainCarefullyClosure* cl);
   744   // Does precleaning work, returning a quantity indicative of
   745   // the amount of "useful work" done.
   746   size_t preclean_work(bool clean_refs, bool clean_survivors);
   747   void abortable_preclean(); // Preclean while looking for possible abort
   748   void initialize_sequential_subtasks_for_young_gen_rescan(int i);
   749   // Helper function for above; merge-sorts the per-thread plab samples
   750   void merge_survivor_plab_arrays(ContiguousSpace* surv);
   751   // Resets (i.e. clears) the per-thread plab sample vectors
   752   void reset_survivor_plab_arrays();
   754   // final (second) checkpoint work
   755   void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
   756                                 bool init_mark_was_synchronous);
   757   // work routine for parallel version of remark
   758   void do_remark_parallel();
   759   // work routine for non-parallel version of remark
   760   void do_remark_non_parallel();
   761   // reference processing work routine (during second checkpoint)
   762   void refProcessingWork(bool asynch, bool clear_all_soft_refs);
   764   // concurrent sweeping work
   765   void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
   767   // (concurrent) resetting of support data structures
   768   void reset(bool asynch);
   770   // Clear _expansion_cause fields of constituent generations
   771   void clear_expansion_cause();
   773   // An auxilliary method used to record the ends of
   774   // used regions of each generation to limit the extent of sweep
   775   void save_sweep_limits();
   777   // Resize the generations included in the collector.
   778   void compute_new_size();
   780   // A work method used by foreground collection to determine
   781   // what type of collection (compacting or not, continuing or fresh)
   782   // it should do.
   783   void decide_foreground_collection_type(bool clear_all_soft_refs,
   784     bool* should_compact, bool* should_start_over);
   786   // A work method used by the foreground collector to do
   787   // a mark-sweep-compact.
   788   void do_compaction_work(bool clear_all_soft_refs);
   790   // A work method used by the foreground collector to do
   791   // a mark-sweep, after taking over from a possibly on-going
   792   // concurrent mark-sweep collection.
   793   void do_mark_sweep_work(bool clear_all_soft_refs,
   794     CollectorState first_state, bool should_start_over);
   796   // If the backgrould GC is active, acquire control from the background
   797   // GC and do the collection.
   798   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
   800   // For synchronizing passing of control from background to foreground
   801   // GC.  waitForForegroundGC() is called by the background
   802   // collector.  It if had to wait for a foreground collection,
   803   // it returns true and the background collection should assume
   804   // that the collection was finished by the foreground
   805   // collector.
   806   bool waitForForegroundGC();
   808   // Incremental mode triggering:  recompute the icms duty cycle and set the
   809   // allocation limits in the young gen.
   810   void icms_update_allocation_limits();
   812   size_t block_size_using_printezis_bits(HeapWord* addr) const;
   813   size_t block_size_if_printezis_bits(HeapWord* addr) const;
   814   HeapWord* next_card_start_after_block(HeapWord* addr) const;
   816   void setup_cms_unloading_and_verification_state();
   817  public:
   818   CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
   819                ConcurrentMarkSweepGeneration* permGen,
   820                CardTableRS*                   ct,
   821                ConcurrentMarkSweepPolicy*     cp);
   822   ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
   824   ReferenceProcessor* ref_processor() { return _ref_processor; }
   825   void ref_processor_init();
   827   Mutex* bitMapLock()        const { return _markBitMap.lock();    }
   828   static CollectorState abstract_state() { return _collectorState;  }
   830   bool should_abort_preclean() const; // Whether preclean should be aborted.
   831   size_t get_eden_used() const;
   832   size_t get_eden_capacity() const;
   834   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
   836   // locking checks
   837   NOT_PRODUCT(static bool have_cms_token();)
   839   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
   840   bool shouldConcurrentCollect();
   842   void collect(bool   full,
   843                bool   clear_all_soft_refs,
   844                size_t size,
   845                bool   tlab);
   846   void collect_in_background(bool clear_all_soft_refs);
   847   void collect_in_foreground(bool clear_all_soft_refs);
   849   // In support of ExplicitGCInvokesConcurrent
   850   static void request_full_gc(unsigned int full_gc_count);
   851   // Should we unload classes in a particular concurrent cycle?
   852   bool should_unload_classes() const {
   853     return _should_unload_classes;
   854   }
   855   bool update_should_unload_classes();
   857   void direct_allocated(HeapWord* start, size_t size);
   859   // Object is dead if not marked and current phase is sweeping.
   860   bool is_dead_obj(oop obj) const;
   862   // After a promotion (of "start"), do any necessary marking.
   863   // If "par", then it's being done by a parallel GC thread.
   864   // The last two args indicate if we need precise marking
   865   // and if so the size of the object so it can be dirtied
   866   // in its entirety.
   867   void promoted(bool par, HeapWord* start,
   868                 bool is_obj_array, size_t obj_size);
   870   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
   871                                      size_t word_size);
   873   void getFreelistLocks() const;
   874   void releaseFreelistLocks() const;
   875   bool haveFreelistLocks() const;
   877   // GC prologue and epilogue
   878   void gc_prologue(bool full);
   879   void gc_epilogue(bool full);
   881   jlong time_of_last_gc(jlong now) {
   882     if (_collectorState <= Idling) {
   883       // gc not in progress
   884       return _time_of_last_gc;
   885     } else {
   886       // collection in progress
   887       return now;
   888     }
   889   }
   891   // Support for parallel remark of survivor space
   892   void* get_data_recorder(int thr_num);
   894   CMSBitMap* markBitMap()  { return &_markBitMap; }
   895   void directAllocated(HeapWord* start, size_t size);
   897   // main CMS steps and related support
   898   void checkpointRootsInitial(bool asynch);
   899   bool markFromRoots(bool asynch);  // a return value of false indicates failure
   900                                     // due to stack overflow
   901   void preclean();
   902   void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
   903                             bool init_mark_was_synchronous);
   904   void sweep(bool asynch);
   906   // Check that the currently executing thread is the expected
   907   // one (foreground collector or background collector).
   908   void check_correct_thread_executing()        PRODUCT_RETURN;
   909   // XXXPERM void print_statistics()           PRODUCT_RETURN;
   911   bool is_cms_reachable(HeapWord* addr);
   913   // Performance Counter Support
   914   CollectorCounters* counters()    { return _gc_counters; }
   916   // timer stuff
   917   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
   918   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
   919   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
   920   double  timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
   922   int  yields()          { return _numYields; }
   923   void resetYields()     { _numYields = 0;    }
   924   void incrementYields() { _numYields++;      }
   925   void resetNumDirtyCards()               { _numDirtyCards = 0; }
   926   void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
   927   size_t  numDirtyCards()                 { return _numDirtyCards; }
   929   static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
   930   static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
   931   static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
   932   static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
   933   uint  sweepCount() const             { return _sweepCount; }
   934   void incrementSweepCount()           { _sweepCount++; }
   936   // Timers/stats for gc scheduling and incremental mode pacing.
   937   CMSStats& stats() { return _stats; }
   939   // Convenience methods that check whether CMSIncrementalMode is enabled and
   940   // forward to the corresponding methods in ConcurrentMarkSweepThread.
   941   static void start_icms();
   942   static void stop_icms();    // Called at the end of the cms cycle.
   943   static void disable_icms(); // Called before a foreground collection.
   944   static void enable_icms();  // Called after a foreground collection.
   945   void icms_wait();          // Called at yield points.
   947   // Adaptive size policy
   948   CMSAdaptiveSizePolicy* size_policy();
   949   CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
   951   // debugging
   952   void verify(bool);
   953   bool verify_after_remark();
   954   void verify_ok_to_terminate() const PRODUCT_RETURN;
   955   void verify_work_stacks_empty() const PRODUCT_RETURN;
   956   void verify_overflow_empty() const PRODUCT_RETURN;
   958   // convenience methods in support of debugging
   959   static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
   960   HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
   962   // accessors
   963   CMSMarkStack* verification_mark_stack() { return &_markStack; }
   964   CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
   966   // Get the bit map with a perm gen "deadness" information.
   967   CMSBitMap* perm_gen_verify_bit_map()       { return &_perm_gen_verify_bit_map; }
   969   // Initialization errors
   970   bool completed_initialization() { return _completed_initialization; }
   971 };
   973 class CMSExpansionCause : public AllStatic  {
   974  public:
   975   enum Cause {
   976     _no_expansion,
   977     _satisfy_free_ratio,
   978     _satisfy_promotion,
   979     _satisfy_allocation,
   980     _allocate_par_lab,
   981     _allocate_par_spooling_space,
   982     _adaptive_size_policy
   983   };
   984   // Return a string describing the cause of the expansion.
   985   static const char* to_string(CMSExpansionCause::Cause cause);
   986 };
   988 class ConcurrentMarkSweepGeneration: public CardGeneration {
   989   friend class VMStructs;
   990   friend class ConcurrentMarkSweepThread;
   991   friend class ConcurrentMarkSweep;
   992   friend class CMSCollector;
   993  protected:
   994   static CMSCollector*       _collector; // the collector that collects us
   995   CompactibleFreeListSpace*  _cmsSpace;  // underlying space (only one for now)
   997   // Performance Counters
   998   GenerationCounters*      _gen_counters;
   999   GSpaceCounters*          _space_counters;
  1001   // Words directly allocated, used by CMSStats.
  1002   size_t _direct_allocated_words;
  1004   // Non-product stat counters
  1005   NOT_PRODUCT(
  1006     int _numObjectsPromoted;
  1007     int _numWordsPromoted;
  1008     int _numObjectsAllocated;
  1009     int _numWordsAllocated;
  1012   // Used for sizing decisions
  1013   bool _incremental_collection_failed;
  1014   bool incremental_collection_failed() {
  1015     return _incremental_collection_failed;
  1017   void set_incremental_collection_failed() {
  1018     _incremental_collection_failed = true;
  1020   void clear_incremental_collection_failed() {
  1021     _incremental_collection_failed = false;
  1024   // accessors
  1025   void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
  1026   CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
  1028  private:
  1029   // For parallel young-gen GC support.
  1030   CMSParGCThreadState** _par_gc_thread_states;
  1032   // Reason generation was expanded
  1033   CMSExpansionCause::Cause _expansion_cause;
  1035   // In support of MinChunkSize being larger than min object size
  1036   const double _dilatation_factor;
  1038   enum CollectionTypes {
  1039     Concurrent_collection_type          = 0,
  1040     MS_foreground_collection_type       = 1,
  1041     MSC_foreground_collection_type      = 2,
  1042     Unknown_collection_type             = 3
  1043   };
  1045   CollectionTypes _debug_collection_type;
  1047   // Fraction of current occupancy at which to start a CMS collection which
  1048   // will collect this generation (at least).
  1049   double _initiating_occupancy;
  1051  protected:
  1052   // Shrink generation by specified size (returns false if unable to shrink)
  1053   virtual void shrink_by(size_t bytes);
  1055   // Update statistics for GC
  1056   virtual void update_gc_stats(int level, bool full);
  1058   // Maximum available space in the generation (including uncommitted)
  1059   // space.
  1060   size_t max_available() const;
  1062   // getter and initializer for _initiating_occupancy field.
  1063   double initiating_occupancy() const { return _initiating_occupancy; }
  1064   void   init_initiating_occupancy(intx io, intx tr);
  1066  public:
  1067   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
  1068                                 int level, CardTableRS* ct,
  1069                                 bool use_adaptive_freelists,
  1070                                 FreeBlockDictionary::DictionaryChoice);
  1072   // Accessors
  1073   CMSCollector* collector() const { return _collector; }
  1074   static void set_collector(CMSCollector* collector) {
  1075     assert(_collector == NULL, "already set");
  1076     _collector = collector;
  1078   CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
  1080   Mutex* freelistLock() const;
  1082   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
  1084   // Adaptive size policy
  1085   CMSAdaptiveSizePolicy* size_policy();
  1087   bool refs_discovery_is_atomic() const { return false; }
  1088   bool refs_discovery_is_mt()     const {
  1089     // Note: CMS does MT-discovery during the parallel-remark
  1090     // phases. Use ReferenceProcessorMTMutator to make refs
  1091     // discovery MT-safe during such phases or other parallel
  1092     // discovery phases in the future. This may all go away
  1093     // if/when we decide that refs discovery is sufficiently
  1094     // rare that the cost of the CAS's involved is in the
  1095     // noise. That's a measurement that should be done, and
  1096     // the code simplified if that turns out to be the case.
  1097     return false;
  1100   // Override
  1101   virtual void ref_processor_init();
  1103   // Grow generation by specified size (returns false if unable to grow)
  1104   bool grow_by(size_t bytes);
  1105   // Grow generation to reserved size.
  1106   bool grow_to_reserved();
  1108   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
  1110   // Space enquiries
  1111   size_t capacity() const;
  1112   size_t used() const;
  1113   size_t free() const;
  1114   double occupancy() const { return ((double)used())/((double)capacity()); }
  1115   size_t contiguous_available() const;
  1116   size_t unsafe_max_alloc_nogc() const;
  1118   // over-rides
  1119   MemRegion used_region() const;
  1120   MemRegion used_region_at_save_marks() const;
  1122   // Does a "full" (forced) collection invoked on this generation collect
  1123   // all younger generations as well? Note that the second conjunct is a
  1124   // hack to allow the collection of the younger gen first if the flag is
  1125   // set. This is better than using th policy's should_collect_gen0_first()
  1126   // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
  1127   virtual bool full_collects_younger_generations() const {
  1128     return UseCMSCompactAtFullCollection && !CollectGen0First;
  1131   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
  1133   // Support for compaction
  1134   CompactibleSpace* first_compaction_space() const;
  1135   // Adjust quantites in the generation affected by
  1136   // the compaction.
  1137   void reset_after_compaction();
  1139   // Allocation support
  1140   HeapWord* allocate(size_t size, bool tlab);
  1141   HeapWord* have_lock_and_allocate(size_t size, bool tlab);
  1142   oop       promote(oop obj, size_t obj_size);
  1143   HeapWord* par_allocate(size_t size, bool tlab) {
  1144     return allocate(size, tlab);
  1147   // Incremental mode triggering.
  1148   HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
  1149                                      size_t word_size);
  1151   // Used by CMSStats to track direct allocation.  The value is sampled and
  1152   // reset after each young gen collection.
  1153   size_t direct_allocated_words() const { return _direct_allocated_words; }
  1154   void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
  1156   // Overrides for parallel promotion.
  1157   virtual oop par_promote(int thread_num,
  1158                           oop obj, markOop m, size_t word_sz);
  1159   // This one should not be called for CMS.
  1160   virtual void par_promote_alloc_undo(int thread_num,
  1161                                       HeapWord* obj, size_t word_sz);
  1162   virtual void par_promote_alloc_done(int thread_num);
  1163   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
  1165   virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
  1166     bool younger_handles_promotion_failure) const;
  1168   bool should_collect(bool full, size_t size, bool tlab);
  1169   virtual bool should_concurrent_collect() const;
  1170   virtual bool is_too_full() const;
  1171   void collect(bool   full,
  1172                bool   clear_all_soft_refs,
  1173                size_t size,
  1174                bool   tlab);
  1176   HeapWord* expand_and_allocate(size_t word_size,
  1177                                 bool tlab,
  1178                                 bool parallel = false);
  1180   // GC prologue and epilogue
  1181   void gc_prologue(bool full);
  1182   void gc_prologue_work(bool full, bool registerClosure,
  1183                         ModUnionClosure* modUnionClosure);
  1184   void gc_epilogue(bool full);
  1185   void gc_epilogue_work(bool full);
  1187   // Time since last GC of this generation
  1188   jlong time_of_last_gc(jlong now) {
  1189     return collector()->time_of_last_gc(now);
  1191   void update_time_of_last_gc(jlong now) {
  1192     collector()-> update_time_of_last_gc(now);
  1195   // Allocation failure
  1196   void expand(size_t bytes, size_t expand_bytes,
  1197     CMSExpansionCause::Cause cause);
  1198   virtual bool expand(size_t bytes, size_t expand_bytes);
  1199   void shrink(size_t bytes);
  1200   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
  1201   bool expand_and_ensure_spooling_space(PromotionInfo* promo);
  1203   // Iteration support and related enquiries
  1204   void save_marks();
  1205   bool no_allocs_since_save_marks();
  1206   void object_iterate_since_last_GC(ObjectClosure* cl);
  1207   void younger_refs_iterate(OopsInGenClosure* cl);
  1209   // Iteration support specific to CMS generations
  1210   void save_sweep_limit();
  1212   // More iteration support
  1213   virtual void oop_iterate(MemRegion mr, OopClosure* cl);
  1214   virtual void oop_iterate(OopClosure* cl);
  1215   virtual void object_iterate(ObjectClosure* cl);
  1217   // Need to declare the full complement of closures, whether we'll
  1218   // override them or not, or get message from the compiler:
  1219   //   oop_since_save_marks_iterate_nv hides virtual function...
  1220   #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
  1221     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
  1222   ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
  1224   // Smart allocation  XXX -- move to CFLSpace?
  1225   void setNearLargestChunk();
  1226   bool isNearLargestChunk(HeapWord* addr);
  1228   // Get the chunk at the end of the space.  Delagates to
  1229   // the space.
  1230   FreeChunk* find_chunk_at_end();
  1232   // Overriding of unused functionality (sharing not yet supported with CMS)
  1233   void pre_adjust_pointers();
  1234   void post_compact();
  1236   // Debugging
  1237   void prepare_for_verify();
  1238   void verify(bool allow_dirty);
  1239   void print_statistics()               PRODUCT_RETURN;
  1241   // Performance Counters support
  1242   virtual void update_counters();
  1243   virtual void update_counters(size_t used);
  1244   void initialize_performance_counters();
  1245   CollectorCounters* counters()  { return collector()->counters(); }
  1247   // Support for parallel remark of survivor space
  1248   void* get_data_recorder(int thr_num) {
  1249     //Delegate to collector
  1250     return collector()->get_data_recorder(thr_num);
  1253   // Printing
  1254   const char* name() const;
  1255   virtual const char* short_name() const { return "CMS"; }
  1256   void        print() const;
  1257   void printOccupancy(const char* s);
  1258   bool must_be_youngest() const { return false; }
  1259   bool must_be_oldest()   const { return true; }
  1261   void compute_new_size();
  1263   CollectionTypes debug_collection_type() { return _debug_collection_type; }
  1264   void rotate_debug_collection_type();
  1265 };
  1267 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
  1269   // Return the size policy from the heap's collector
  1270   // policy casted to CMSAdaptiveSizePolicy*.
  1271   CMSAdaptiveSizePolicy* cms_size_policy() const;
  1273   // Resize the generation based on the adaptive size
  1274   // policy.
  1275   void resize(size_t cur_promo, size_t desired_promo);
  1277   // Return the GC counters from the collector policy
  1278   CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
  1280   virtual void shrink_by(size_t bytes);
  1282  public:
  1283   virtual void compute_new_size();
  1284   ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
  1285                                   int level, CardTableRS* ct,
  1286                                   bool use_adaptive_freelists,
  1287                                   FreeBlockDictionary::DictionaryChoice
  1288                                     dictionaryChoice) :
  1289     ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
  1290       use_adaptive_freelists, dictionaryChoice) {}
  1292   virtual const char* short_name() const { return "ASCMS"; }
  1293   virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
  1295   virtual void update_counters();
  1296   virtual void update_counters(size_t used);
  1297 };
  1299 //
  1300 // Closures of various sorts used by CMS to accomplish its work
  1301 //
  1303 // This closure is used to check that a certain set of oops is empty.
  1304 class FalseClosure: public OopClosure {
  1305  public:
  1306   void do_oop(oop* p)       { guarantee(false, "Should be an empty set"); }
  1307   void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
  1308 };
  1310 // This closure is used to do concurrent marking from the roots
  1311 // following the first checkpoint.
  1312 class MarkFromRootsClosure: public BitMapClosure {
  1313   CMSCollector*  _collector;
  1314   MemRegion      _span;
  1315   CMSBitMap*     _bitMap;
  1316   CMSBitMap*     _mut;
  1317   CMSMarkStack*  _markStack;
  1318   CMSMarkStack*  _revisitStack;
  1319   bool           _yield;
  1320   int            _skipBits;
  1321   HeapWord*      _finger;
  1322   HeapWord*      _threshold;
  1323   DEBUG_ONLY(bool _verifying;)
  1325  public:
  1326   MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
  1327                        CMSBitMap* bitMap,
  1328                        CMSMarkStack*  markStack,
  1329                        CMSMarkStack*  revisitStack,
  1330                        bool should_yield, bool verifying = false);
  1331   bool do_bit(size_t offset);
  1332   void reset(HeapWord* addr);
  1333   inline void do_yield_check();
  1335  private:
  1336   void scanOopsInOop(HeapWord* ptr);
  1337   void do_yield_work();
  1338 };
  1340 // This closure is used to do concurrent multi-threaded
  1341 // marking from the roots following the first checkpoint.
  1342 // XXX This should really be a subclass of The serial version
  1343 // above, but i have not had the time to refactor things cleanly.
  1344 // That willbe done for Dolphin.
  1345 class Par_MarkFromRootsClosure: public BitMapClosure {
  1346   CMSCollector*  _collector;
  1347   MemRegion      _whole_span;
  1348   MemRegion      _span;
  1349   CMSBitMap*     _bit_map;
  1350   CMSBitMap*     _mut;
  1351   OopTaskQueue*  _work_queue;
  1352   CMSMarkStack*  _overflow_stack;
  1353   CMSMarkStack*  _revisit_stack;
  1354   bool           _yield;
  1355   int            _skip_bits;
  1356   HeapWord*      _finger;
  1357   HeapWord*      _threshold;
  1358   CMSConcMarkingTask* _task;
  1359  public:
  1360   Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
  1361                        MemRegion span,
  1362                        CMSBitMap* bit_map,
  1363                        OopTaskQueue* work_queue,
  1364                        CMSMarkStack*  overflow_stack,
  1365                        CMSMarkStack*  revisit_stack,
  1366                        bool should_yield);
  1367   bool do_bit(size_t offset);
  1368   inline void do_yield_check();
  1370  private:
  1371   void scan_oops_in_oop(HeapWord* ptr);
  1372   void do_yield_work();
  1373   bool get_work_from_overflow_stack();
  1374 };
  1376 // The following closures are used to do certain kinds of verification of
  1377 // CMS marking.
  1378 class PushAndMarkVerifyClosure: public OopClosure {
  1379   CMSCollector*    _collector;
  1380   MemRegion        _span;
  1381   CMSBitMap*       _verification_bm;
  1382   CMSBitMap*       _cms_bm;
  1383   CMSMarkStack*    _mark_stack;
  1384  protected:
  1385   void do_oop(oop p);
  1386   template <class T> inline void do_oop_work(T *p) {
  1387     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
  1388     do_oop(obj);
  1390  public:
  1391   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
  1392                            MemRegion span,
  1393                            CMSBitMap* verification_bm,
  1394                            CMSBitMap* cms_bm,
  1395                            CMSMarkStack*  mark_stack);
  1396   void do_oop(oop* p);
  1397   void do_oop(narrowOop* p);
  1398   // Deal with a stack overflow condition
  1399   void handle_stack_overflow(HeapWord* lost);
  1400 };
  1402 class MarkFromRootsVerifyClosure: public BitMapClosure {
  1403   CMSCollector*  _collector;
  1404   MemRegion      _span;
  1405   CMSBitMap*     _verification_bm;
  1406   CMSBitMap*     _cms_bm;
  1407   CMSMarkStack*  _mark_stack;
  1408   HeapWord*      _finger;
  1409   PushAndMarkVerifyClosure _pam_verify_closure;
  1410  public:
  1411   MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
  1412                              CMSBitMap* verification_bm,
  1413                              CMSBitMap* cms_bm,
  1414                              CMSMarkStack*  mark_stack);
  1415   bool do_bit(size_t offset);
  1416   void reset(HeapWord* addr);
  1417 };
  1420 // This closure is used to check that a certain set of bits is
  1421 // "empty" (i.e. the bit vector doesn't have any 1-bits).
  1422 class FalseBitMapClosure: public BitMapClosure {
  1423  public:
  1424   bool do_bit(size_t offset) {
  1425     guarantee(false, "Should not have a 1 bit");
  1426     return true;
  1428 };
  1430 // This closure is used during the second checkpointing phase
  1431 // to rescan the marked objects on the dirty cards in the mod
  1432 // union table and the card table proper. It's invoked via
  1433 // MarkFromDirtyCardsClosure below. It uses either
  1434 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
  1435 // declared in genOopClosures.hpp to accomplish some of its work.
  1436 // In the parallel case the bitMap is shared, so access to
  1437 // it needs to be suitably synchronized for updates by embedded
  1438 // closures that update it; however, this closure itself only
  1439 // reads the bit_map and because it is idempotent, is immune to
  1440 // reading stale values.
  1441 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
  1442   #ifdef ASSERT
  1443     CMSCollector*          _collector;
  1444     MemRegion              _span;
  1445     union {
  1446       CMSMarkStack*        _mark_stack;
  1447       OopTaskQueue*        _work_queue;
  1448     };
  1449   #endif // ASSERT
  1450   bool                       _parallel;
  1451   CMSBitMap*                 _bit_map;
  1452   union {
  1453     MarkRefsIntoAndScanClosure*     _scan_closure;
  1454     Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
  1455   };
  1457  public:
  1458   ScanMarkedObjectsAgainClosure(CMSCollector* collector,
  1459                                 MemRegion span,
  1460                                 ReferenceProcessor* rp,
  1461                                 CMSBitMap* bit_map,
  1462                                 CMSMarkStack*  mark_stack,
  1463                                 CMSMarkStack*  revisit_stack,
  1464                                 MarkRefsIntoAndScanClosure* cl):
  1465     #ifdef ASSERT
  1466       _collector(collector),
  1467       _span(span),
  1468       _mark_stack(mark_stack),
  1469     #endif // ASSERT
  1470     _parallel(false),
  1471     _bit_map(bit_map),
  1472     _scan_closure(cl) { }
  1474   ScanMarkedObjectsAgainClosure(CMSCollector* collector,
  1475                                 MemRegion span,
  1476                                 ReferenceProcessor* rp,
  1477                                 CMSBitMap* bit_map,
  1478                                 OopTaskQueue* work_queue,
  1479                                 CMSMarkStack* revisit_stack,
  1480                                 Par_MarkRefsIntoAndScanClosure* cl):
  1481     #ifdef ASSERT
  1482       _collector(collector),
  1483       _span(span),
  1484       _work_queue(work_queue),
  1485     #endif // ASSERT
  1486     _parallel(true),
  1487     _bit_map(bit_map),
  1488     _par_scan_closure(cl) { }
  1490   void do_object(oop obj) {
  1491     guarantee(false, "Call do_object_b(oop, MemRegion) instead");
  1493   bool do_object_b(oop obj) {
  1494     guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
  1495     return false;
  1497   bool do_object_bm(oop p, MemRegion mr);
  1498 };
  1500 // This closure is used during the second checkpointing phase
  1501 // to rescan the marked objects on the dirty cards in the mod
  1502 // union table and the card table proper. It invokes
  1503 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
  1504 // In the parallel case, the bit map is shared and requires
  1505 // synchronized access.
  1506 class MarkFromDirtyCardsClosure: public MemRegionClosure {
  1507   CompactibleFreeListSpace*      _space;
  1508   ScanMarkedObjectsAgainClosure  _scan_cl;
  1509   size_t                         _num_dirty_cards;
  1511  public:
  1512   MarkFromDirtyCardsClosure(CMSCollector* collector,
  1513                             MemRegion span,
  1514                             CompactibleFreeListSpace* space,
  1515                             CMSBitMap* bit_map,
  1516                             CMSMarkStack* mark_stack,
  1517                             CMSMarkStack* revisit_stack,
  1518                             MarkRefsIntoAndScanClosure* cl):
  1519     _space(space),
  1520     _num_dirty_cards(0),
  1521     _scan_cl(collector, span, collector->ref_processor(), bit_map,
  1522                  mark_stack, revisit_stack, cl) { }
  1524   MarkFromDirtyCardsClosure(CMSCollector* collector,
  1525                             MemRegion span,
  1526                             CompactibleFreeListSpace* space,
  1527                             CMSBitMap* bit_map,
  1528                             OopTaskQueue* work_queue,
  1529                             CMSMarkStack* revisit_stack,
  1530                             Par_MarkRefsIntoAndScanClosure* cl):
  1531     _space(space),
  1532     _num_dirty_cards(0),
  1533     _scan_cl(collector, span, collector->ref_processor(), bit_map,
  1534              work_queue, revisit_stack, cl) { }
  1536   void do_MemRegion(MemRegion mr);
  1537   void set_space(CompactibleFreeListSpace* space) { _space = space; }
  1538   size_t num_dirty_cards() { return _num_dirty_cards; }
  1539 };
  1541 // This closure is used in the non-product build to check
  1542 // that there are no MemRegions with a certain property.
  1543 class FalseMemRegionClosure: public MemRegionClosure {
  1544   void do_MemRegion(MemRegion mr) {
  1545     guarantee(!mr.is_empty(), "Shouldn't be empty");
  1546     guarantee(false, "Should never be here");
  1548 };
  1550 // This closure is used during the precleaning phase
  1551 // to "carefully" rescan marked objects on dirty cards.
  1552 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
  1553 // to accomplish some of its work.
  1554 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
  1555   CMSCollector*                  _collector;
  1556   MemRegion                      _span;
  1557   bool                           _yield;
  1558   Mutex*                         _freelistLock;
  1559   CMSBitMap*                     _bitMap;
  1560   CMSMarkStack*                  _markStack;
  1561   MarkRefsIntoAndScanClosure*    _scanningClosure;
  1563  public:
  1564   ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
  1565                                          MemRegion     span,
  1566                                          CMSBitMap* bitMap,
  1567                                          CMSMarkStack*  markStack,
  1568                                          CMSMarkStack*  revisitStack,
  1569                                          MarkRefsIntoAndScanClosure* cl,
  1570                                          bool should_yield):
  1571     _collector(collector),
  1572     _span(span),
  1573     _yield(should_yield),
  1574     _bitMap(bitMap),
  1575     _markStack(markStack),
  1576     _scanningClosure(cl) {
  1579   void do_object(oop p) {
  1580     guarantee(false, "call do_object_careful instead");
  1583   size_t      do_object_careful(oop p) {
  1584     guarantee(false, "Unexpected caller");
  1585     return 0;
  1588   size_t      do_object_careful_m(oop p, MemRegion mr);
  1590   void setFreelistLock(Mutex* m) {
  1591     _freelistLock = m;
  1592     _scanningClosure->set_freelistLock(m);
  1595  private:
  1596   inline bool do_yield_check();
  1598   void do_yield_work();
  1599 };
  1601 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
  1602   CMSCollector*                  _collector;
  1603   MemRegion                      _span;
  1604   bool                           _yield;
  1605   CMSBitMap*                     _bit_map;
  1606   CMSMarkStack*                  _mark_stack;
  1607   PushAndMarkClosure*            _scanning_closure;
  1608   unsigned int                   _before_count;
  1610  public:
  1611   SurvivorSpacePrecleanClosure(CMSCollector* collector,
  1612                                MemRegion     span,
  1613                                CMSBitMap*    bit_map,
  1614                                CMSMarkStack* mark_stack,
  1615                                PushAndMarkClosure* cl,
  1616                                unsigned int  before_count,
  1617                                bool          should_yield):
  1618     _collector(collector),
  1619     _span(span),
  1620     _yield(should_yield),
  1621     _bit_map(bit_map),
  1622     _mark_stack(mark_stack),
  1623     _scanning_closure(cl),
  1624     _before_count(before_count)
  1625   { }
  1627   void do_object(oop p) {
  1628     guarantee(false, "call do_object_careful instead");
  1631   size_t      do_object_careful(oop p);
  1633   size_t      do_object_careful_m(oop p, MemRegion mr) {
  1634     guarantee(false, "Unexpected caller");
  1635     return 0;
  1638  private:
  1639   inline void do_yield_check();
  1640   void do_yield_work();
  1641 };
  1643 // This closure is used to accomplish the sweeping work
  1644 // after the second checkpoint but before the concurrent reset
  1645 // phase.
  1646 //
  1647 // Terminology
  1648 //   left hand chunk (LHC) - block of one or more chunks currently being
  1649 //     coalesced.  The LHC is available for coalescing with a new chunk.
  1650 //   right hand chunk (RHC) - block that is currently being swept that is
  1651 //     free or garbage that can be coalesced with the LHC.
  1652 // _inFreeRange is true if there is currently a LHC
  1653 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
  1654 // _freeRangeInFreeLists is true if the LHC is in the free lists.
  1655 // _freeFinger is the address of the current LHC
  1656 class SweepClosure: public BlkClosureCareful {
  1657   CMSCollector*                  _collector;  // collector doing the work
  1658   ConcurrentMarkSweepGeneration* _g;    // Generation being swept
  1659   CompactibleFreeListSpace*      _sp;   // Space being swept
  1660   HeapWord*                      _limit;
  1661   Mutex*                         _freelistLock; // Free list lock (in space)
  1662   CMSBitMap*                     _bitMap;       // Marking bit map (in
  1663                                                 // generation)
  1664   bool                           _inFreeRange;  // Indicates if we are in the
  1665                                                 // midst of a free run
  1666   bool                           _freeRangeInFreeLists;
  1667                                         // Often, we have just found
  1668                                         // a free chunk and started
  1669                                         // a new free range; we do not
  1670                                         // eagerly remove this chunk from
  1671                                         // the free lists unless there is
  1672                                         // a possibility of coalescing.
  1673                                         // When true, this flag indicates
  1674                                         // that the _freeFinger below
  1675                                         // points to a potentially free chunk
  1676                                         // that may still be in the free lists
  1677   bool                           _lastFreeRangeCoalesced;
  1678                                         // free range contains chunks
  1679                                         // coalesced
  1680   bool                           _yield;
  1681                                         // Whether sweeping should be
  1682                                         // done with yields. For instance
  1683                                         // when done by the foreground
  1684                                         // collector we shouldn't yield.
  1685   HeapWord*                      _freeFinger;   // When _inFreeRange is set, the
  1686                                                 // pointer to the "left hand
  1687                                                 // chunk"
  1688   size_t                         _freeRangeSize;
  1689                                         // When _inFreeRange is set, this
  1690                                         // indicates the accumulated size
  1691                                         // of the "left hand chunk"
  1692   NOT_PRODUCT(
  1693     size_t                       _numObjectsFreed;
  1694     size_t                       _numWordsFreed;
  1695     size_t                       _numObjectsLive;
  1696     size_t                       _numWordsLive;
  1697     size_t                       _numObjectsAlreadyFree;
  1698     size_t                       _numWordsAlreadyFree;
  1699     FreeChunk*                   _last_fc;
  1701  private:
  1702   // Code that is common to a free chunk or garbage when
  1703   // encountered during sweeping.
  1704   void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
  1705                                   size_t chunkSize);
  1706   // Process a free chunk during sweeping.
  1707   void doAlreadyFreeChunk(FreeChunk *fc);
  1708   // Process a garbage chunk during sweeping.
  1709   size_t doGarbageChunk(FreeChunk *fc);
  1710   // Process a live chunk during sweeping.
  1711   size_t doLiveChunk(FreeChunk* fc);
  1713   // Accessors.
  1714   HeapWord* freeFinger() const          { return _freeFinger; }
  1715   void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
  1716   size_t freeRangeSize() const          { return _freeRangeSize; }
  1717   void set_freeRangeSize(size_t v)      { _freeRangeSize = v; }
  1718   bool inFreeRange()    const           { return _inFreeRange; }
  1719   void set_inFreeRange(bool v)          { _inFreeRange = v; }
  1720   bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
  1721   void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
  1722   bool freeRangeInFreeLists() const     { return _freeRangeInFreeLists; }
  1723   void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
  1725   // Initialize a free range.
  1726   void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
  1727   // Return this chunk to the free lists.
  1728   void flushCurFreeChunk(HeapWord* chunk, size_t size);
  1730   // Check if we should yield and do so when necessary.
  1731   inline void do_yield_check(HeapWord* addr);
  1733   // Yield
  1734   void do_yield_work(HeapWord* addr);
  1736   // Debugging/Printing
  1737   void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
  1739  public:
  1740   SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
  1741                CMSBitMap* bitMap, bool should_yield);
  1742   ~SweepClosure();
  1744   size_t       do_blk_careful(HeapWord* addr);
  1745 };
  1747 // Closures related to weak references processing
  1749 // During CMS' weak reference processing, this is a
  1750 // work-routine/closure used to complete transitive
  1751 // marking of objects as live after a certain point
  1752 // in which an initial set has been completely accumulated.
  1753 // This closure is currently used both during the final
  1754 // remark stop-world phase, as well as during the concurrent
  1755 // precleaning of the discovered reference lists.
  1756 class CMSDrainMarkingStackClosure: public VoidClosure {
  1757   CMSCollector*        _collector;
  1758   MemRegion            _span;
  1759   CMSMarkStack*        _mark_stack;
  1760   CMSBitMap*           _bit_map;
  1761   CMSKeepAliveClosure* _keep_alive;
  1762   bool                 _concurrent_precleaning;
  1763  public:
  1764   CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
  1765                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
  1766                       CMSKeepAliveClosure* keep_alive,
  1767                       bool cpc):
  1768     _collector(collector),
  1769     _span(span),
  1770     _bit_map(bit_map),
  1771     _mark_stack(mark_stack),
  1772     _keep_alive(keep_alive),
  1773     _concurrent_precleaning(cpc) {
  1774     assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
  1775            "Mismatch");
  1778   void do_void();
  1779 };
  1781 // A parallel version of CMSDrainMarkingStackClosure above.
  1782 class CMSParDrainMarkingStackClosure: public VoidClosure {
  1783   CMSCollector*           _collector;
  1784   MemRegion               _span;
  1785   OopTaskQueue*           _work_queue;
  1786   CMSBitMap*              _bit_map;
  1787   CMSInnerParMarkAndPushClosure _mark_and_push;
  1789  public:
  1790   CMSParDrainMarkingStackClosure(CMSCollector* collector,
  1791                                  MemRegion span, CMSBitMap* bit_map,
  1792                                  OopTaskQueue* work_queue):
  1793     _collector(collector),
  1794     _span(span),
  1795     _bit_map(bit_map),
  1796     _work_queue(work_queue),
  1797     _mark_and_push(collector, span, bit_map, work_queue) { }
  1799  public:
  1800   void trim_queue(uint max);
  1801   void do_void();
  1802 };
  1804 // Allow yielding or short-circuiting of reference list
  1805 // prelceaning work.
  1806 class CMSPrecleanRefsYieldClosure: public YieldClosure {
  1807   CMSCollector* _collector;
  1808   void do_yield_work();
  1809  public:
  1810   CMSPrecleanRefsYieldClosure(CMSCollector* collector):
  1811     _collector(collector) {}
  1812   virtual bool should_return();
  1813 };
  1816 // Convenience class that locks free list locks for given CMS collector
  1817 class FreelistLocker: public StackObj {
  1818  private:
  1819   CMSCollector* _collector;
  1820  public:
  1821   FreelistLocker(CMSCollector* collector):
  1822     _collector(collector) {
  1823     _collector->getFreelistLocks();
  1826   ~FreelistLocker() {
  1827     _collector->releaseFreelistLocks();
  1829 };
  1831 // Mark all dead objects in a given space.
  1832 class MarkDeadObjectsClosure: public BlkClosure {
  1833   const CMSCollector*             _collector;
  1834   const CompactibleFreeListSpace* _sp;
  1835   CMSBitMap*                      _live_bit_map;
  1836   CMSBitMap*                      _dead_bit_map;
  1837 public:
  1838   MarkDeadObjectsClosure(const CMSCollector* collector,
  1839                          const CompactibleFreeListSpace* sp,
  1840                          CMSBitMap *live_bit_map,
  1841                          CMSBitMap *dead_bit_map) :
  1842     _collector(collector),
  1843     _sp(sp),
  1844     _live_bit_map(live_bit_map),
  1845     _dead_bit_map(dead_bit_map) {}
  1846   size_t do_blk(HeapWord* addr);
  1847 };

mercurial