src/share/vm/memory/referenceProcessor.hpp

Mon, 23 Jun 2008 16:49:37 -0700

author
ysr
date
Mon, 23 Jun 2008 16:49:37 -0700
changeset 782
60fb9c4db4e6
parent 777
37f87013dfd8
child 791
1ee8caae33af
permissions
-rw-r--r--

6718086: CMS assert: _concurrent_iteration_safe_limit update missed
Summary: Initialize the field correctly in ContiguousSpace's constructor and initialize() methods, using the latter for the survivor spaces upon initial construction or a subsequent resizing of the young generation. Add some missing Space sub-class constructors.
Reviewed-by: apetrusenko

     1 /*
     2  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // ReferenceProcessor class encapsulates the per-"collector" processing
    26 // of "weak" references for GC. The interface is useful for supporting
    27 // a generational abstraction, in particular when there are multiple
    28 // generations that are being independently collected -- possibly
    29 // concurrently and/or incrementally.  Note, however, that the
    30 // ReferenceProcessor class abstracts away from a generational setting
    31 // by using only a heap interval (called "span" below), thus allowing
    32 // its use in a straightforward manner in a general, non-generational
    33 // setting.
    34 //
    35 // The basic idea is that each ReferenceProcessor object concerns
    36 // itself with ("weak") reference processing in a specific "span"
    37 // of the heap of interest to a specific collector. Currently,
    38 // the span is a convex interval of the heap, but, efficiency
    39 // apart, there seems to be no reason it couldn't be extended
    40 // (with appropriate modifications) to any "non-convex interval".
    42 // forward references
    43 class ReferencePolicy;
    44 class AbstractRefProcTaskExecutor;
    45 class DiscoveredList;
    47 class ReferenceProcessor : public CHeapObj {
    48  protected:
    49   // End of list marker
    50   static oop  _sentinelRef;
    51   MemRegion   _span; // (right-open) interval of heap
    52                      // subject to wkref discovery
    53   bool        _discovering_refs;      // true when discovery enabled
    54   bool        _discovery_is_atomic;   // if discovery is atomic wrt
    55                                       // other collectors in configuration
    56   bool        _discovery_is_mt;       // true if reference discovery is MT.
    57   // If true, setting "next" field of a discovered refs list requires
    58   // write barrier(s).  (Must be true if used in a collector in which
    59   // elements of a discovered list may be moved during discovery: for
    60   // example, a collector like Garbage-First that moves objects during a
    61   // long-term concurrent marking phase that does weak reference
    62   // discovery.)
    63   bool        _discovered_list_needs_barrier;
    64   BarrierSet* _bs;                    // Cached copy of BarrierSet.
    65   bool        _enqueuing_is_done;     // true if all weak references enqueued
    66   bool        _processing_is_mt;      // true during phases when
    67                                       // reference processing is MT.
    68   int         _next_id;               // round-robin counter in
    69                                       // support of work distribution
    71   // For collectors that do not keep GC marking information
    72   // in the object header, this field holds a closure that
    73   // helps the reference processor determine the reachability
    74   // of an oop (the field is currently initialized to NULL for
    75   // all collectors but the CMS collector).
    76   BoolObjectClosure* _is_alive_non_header;
    78   // The discovered ref lists themselves
    80   // The MT'ness degree of the queues below
    81   int             _num_q;
    82   // Arrays of lists of oops, one per thread
    83   DiscoveredList* _discoveredSoftRefs;
    84   DiscoveredList* _discoveredWeakRefs;
    85   DiscoveredList* _discoveredFinalRefs;
    86   DiscoveredList* _discoveredPhantomRefs;
    88  public:
    89   int num_q()                            { return _num_q; }
    90   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
    91   static oop  sentinel_ref()             { return _sentinelRef; }
    92   static oop* adr_sentinel_ref()         { return &_sentinelRef; }
    94  public:
    95   // Process references with a certain reachability level.
    96   void process_discovered_reflist(DiscoveredList               refs_lists[],
    97                                   ReferencePolicy*             policy,
    98                                   bool                         clear_referent,
    99                                   BoolObjectClosure*           is_alive,
   100                                   OopClosure*                  keep_alive,
   101                                   VoidClosure*                 complete_gc,
   102                                   AbstractRefProcTaskExecutor* task_executor);
   104   void process_phaseJNI(BoolObjectClosure* is_alive,
   105                         OopClosure*        keep_alive,
   106                         VoidClosure*       complete_gc);
   108   // Work methods used by the method process_discovered_reflist
   109   // Phase1: keep alive all those referents that are otherwise
   110   // dead but which must be kept alive by policy (and their closure).
   111   void process_phase1(DiscoveredList&     refs_list,
   112                       ReferencePolicy*    policy,
   113                       BoolObjectClosure*  is_alive,
   114                       OopClosure*         keep_alive,
   115                       VoidClosure*        complete_gc);
   116   // Phase2: remove all those references whose referents are
   117   // reachable.
   118   inline void process_phase2(DiscoveredList&    refs_list,
   119                              BoolObjectClosure* is_alive,
   120                              OopClosure*        keep_alive,
   121                              VoidClosure*       complete_gc) {
   122     if (discovery_is_atomic()) {
   123       // complete_gc is ignored in this case for this phase
   124       pp2_work(refs_list, is_alive, keep_alive);
   125     } else {
   126       assert(complete_gc != NULL, "Error");
   127       pp2_work_concurrent_discovery(refs_list, is_alive,
   128                                     keep_alive, complete_gc);
   129     }
   130   }
   131   // Work methods in support of process_phase2
   132   void pp2_work(DiscoveredList&    refs_list,
   133                 BoolObjectClosure* is_alive,
   134                 OopClosure*        keep_alive);
   135   void pp2_work_concurrent_discovery(
   136                 DiscoveredList&    refs_list,
   137                 BoolObjectClosure* is_alive,
   138                 OopClosure*        keep_alive,
   139                 VoidClosure*       complete_gc);
   140   // Phase3: process the referents by either clearing them
   141   // or keeping them alive (and their closure)
   142   void process_phase3(DiscoveredList&    refs_list,
   143                       bool               clear_referent,
   144                       BoolObjectClosure* is_alive,
   145                       OopClosure*        keep_alive,
   146                       VoidClosure*       complete_gc);
   148   // Enqueue references with a certain reachability level
   149   void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
   151   // "Preclean" all the discovered reference lists
   152   // by removing references with strongly reachable referents.
   153   // The first argument is a predicate on an oop that indicates
   154   // its (strong) reachability and the second is a closure that
   155   // may be used to incrementalize or abort the precleaning process.
   156   // The caller is responsible for taking care of potential
   157   // interference with concurrent operations on these lists
   158   // (or predicates involved) by other threads. Currently
   159   // only used by the CMS collector.
   160   void preclean_discovered_references(BoolObjectClosure* is_alive,
   161                                       OopClosure*        keep_alive,
   162                                       VoidClosure*       complete_gc,
   163                                       YieldClosure*      yield);
   165   // Delete entries in the discovered lists that have
   166   // either a null referent or are not active. Such
   167   // Reference objects can result from the clearing
   168   // or enqueueing of Reference objects concurrent
   169   // with their discovery by a (concurrent) collector.
   170   // For a definition of "active" see java.lang.ref.Reference;
   171   // Refs are born active, become inactive when enqueued,
   172   // and never become active again. The state of being
   173   // active is encoded as follows: A Ref is active
   174   // if and only if its "next" field is NULL.
   175   void clean_up_discovered_references();
   176   void clean_up_discovered_reflist(DiscoveredList& refs_list);
   178   // Returns the name of the discovered reference list
   179   // occupying the i / _num_q slot.
   180   const char* list_name(int i);
   182   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
   184  protected:
   185   // "Preclean" the given discovered reference list
   186   // by removing references with strongly reachable referents.
   187   // Currently used in support of CMS only.
   188   void preclean_discovered_reflist(DiscoveredList&    refs_list,
   189                                    BoolObjectClosure* is_alive,
   190                                    OopClosure*        keep_alive,
   191                                    VoidClosure*       complete_gc,
   192                                    YieldClosure*      yield);
   194   int next_id() {
   195     int id = _next_id;
   196     if (++_next_id == _num_q) {
   197       _next_id = 0;
   198     }
   199     return id;
   200   }
   201   DiscoveredList* get_discovered_list(ReferenceType rt);
   202   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
   203                                         HeapWord* discovered_addr);
   204   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
   206   void abandon_partial_discovered_list(DiscoveredList& refs_list);
   208   // Calculate the number of jni handles.
   209   unsigned int count_jni_refs();
   211   // Balances reference queues.
   212   void balance_queues(DiscoveredList ref_lists[]);
   214   // Update (advance) the soft ref master clock field.
   215   void update_soft_ref_master_clock();
   217  public:
   218   // constructor
   219   ReferenceProcessor():
   220     _span((HeapWord*)NULL, (HeapWord*)NULL),
   221     _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
   222     _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
   223     _discovering_refs(false),
   224     _discovery_is_atomic(true),
   225     _enqueuing_is_done(false),
   226     _discovery_is_mt(false),
   227     _discovered_list_needs_barrier(false),
   228     _bs(NULL),
   229     _is_alive_non_header(NULL),
   230     _num_q(0),
   231     _processing_is_mt(false),
   232     _next_id(0)
   233   {}
   235   ReferenceProcessor(MemRegion span, bool atomic_discovery,
   236                      bool mt_discovery,
   237                      int mt_degree = 1,
   238                      bool mt_processing = false,
   239                      bool discovered_list_needs_barrier = false);
   241   // Allocates and initializes a reference processor.
   242   static ReferenceProcessor* create_ref_processor(
   243     MemRegion          span,
   244     bool               atomic_discovery,
   245     bool               mt_discovery,
   246     BoolObjectClosure* is_alive_non_header = NULL,
   247     int                parallel_gc_threads = 1,
   248     bool               mt_processing = false,
   249     bool               discovered_list_needs_barrier = false);
   250   // RefDiscoveryPolicy values
   251   enum {
   252     ReferenceBasedDiscovery = 0,
   253     ReferentBasedDiscovery  = 1
   254   };
   256   static void init_statics();
   258  public:
   259   // get and set "is_alive_non_header" field
   260   BoolObjectClosure* is_alive_non_header() {
   261     return _is_alive_non_header;
   262   }
   263   void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
   264     _is_alive_non_header = is_alive_non_header;
   265   }
   267   // get and set span
   268   MemRegion span()                   { return _span; }
   269   void      set_span(MemRegion span) { _span = span; }
   271   // start and stop weak ref discovery
   272   void enable_discovery()   { _discovering_refs = true;  }
   273   void disable_discovery()  { _discovering_refs = false; }
   274   bool discovery_enabled()  { return _discovering_refs;  }
   276   // whether discovery is atomic wrt other collectors
   277   bool discovery_is_atomic() const { return _discovery_is_atomic; }
   278   void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
   280   // whether discovery is done by multiple threads same-old-timeously
   281   bool discovery_is_mt() const { return _discovery_is_mt; }
   282   void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
   284   // Whether we are in a phase when _processing_ is MT.
   285   bool processing_is_mt() const { return _processing_is_mt; }
   286   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
   288   // whether all enqueuing of weak references is complete
   289   bool enqueuing_is_done()  { return _enqueuing_is_done; }
   290   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
   292   // iterate over oops
   293   void weak_oops_do(OopClosure* f);       // weak roots
   294   static void oops_do(OopClosure* f);     // strong root(s)
   296   // Discover a Reference object, using appropriate discovery criteria
   297   bool discover_reference(oop obj, ReferenceType rt);
   299   // Process references found during GC (called by the garbage collector)
   300   void process_discovered_references(ReferencePolicy*             policy,
   301                                      BoolObjectClosure*           is_alive,
   302                                      OopClosure*                  keep_alive,
   303                                      VoidClosure*                 complete_gc,
   304                                      AbstractRefProcTaskExecutor* task_executor);
   306  public:
   307   // Enqueue references at end of GC (called by the garbage collector)
   308   bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
   310   // If a discovery is in process that is being superceded, abandon it: all
   311   // the discovered lists will be empty, and all the objects on them will
   312   // have NULL discovered fields.  Must be called only at a safepoint.
   313   void abandon_partial_discovery();
   315   // debugging
   316   void verify_no_references_recorded() PRODUCT_RETURN;
   317   static void verify();
   319   // clear the discovered lists (unlinking each entry).
   320   void clear_discovered_references() PRODUCT_RETURN;
   321 };
   323 // A utility class to disable reference discovery in
   324 // the scope which contains it, for given ReferenceProcessor.
   325 class NoRefDiscovery: StackObj {
   326  private:
   327   ReferenceProcessor* _rp;
   328   bool _was_discovering_refs;
   329  public:
   330   NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
   331     if (_was_discovering_refs = _rp->discovery_enabled()) {
   332       _rp->disable_discovery();
   333     }
   334   }
   336   ~NoRefDiscovery() {
   337     if (_was_discovering_refs) {
   338       _rp->enable_discovery();
   339     }
   340   }
   341 };
   344 // A utility class to temporarily mutate the span of the
   345 // given ReferenceProcessor in the scope that contains it.
   346 class ReferenceProcessorSpanMutator: StackObj {
   347  private:
   348   ReferenceProcessor* _rp;
   349   MemRegion           _saved_span;
   351  public:
   352   ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
   353                                 MemRegion span):
   354     _rp(rp) {
   355     _saved_span = _rp->span();
   356     _rp->set_span(span);
   357   }
   359   ~ReferenceProcessorSpanMutator() {
   360     _rp->set_span(_saved_span);
   361   }
   362 };
   364 // A utility class to temporarily change the MT'ness of
   365 // reference discovery for the given ReferenceProcessor
   366 // in the scope that contains it.
   367 class ReferenceProcessorMTMutator: StackObj {
   368  private:
   369   ReferenceProcessor* _rp;
   370   bool                _saved_mt;
   372  public:
   373   ReferenceProcessorMTMutator(ReferenceProcessor* rp,
   374                               bool mt):
   375     _rp(rp) {
   376     _saved_mt = _rp->discovery_is_mt();
   377     _rp->set_mt_discovery(mt);
   378   }
   380   ~ReferenceProcessorMTMutator() {
   381     _rp->set_mt_discovery(_saved_mt);
   382   }
   383 };
   386 // A utility class to temporarily change the disposition
   387 // of the "is_alive_non_header" closure field of the
   388 // given ReferenceProcessor in the scope that contains it.
   389 class ReferenceProcessorIsAliveMutator: StackObj {
   390  private:
   391   ReferenceProcessor* _rp;
   392   BoolObjectClosure*  _saved_cl;
   394  public:
   395   ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
   396                                    BoolObjectClosure*  cl):
   397     _rp(rp) {
   398     _saved_cl = _rp->is_alive_non_header();
   399     _rp->set_is_alive_non_header(cl);
   400   }
   402   ~ReferenceProcessorIsAliveMutator() {
   403     _rp->set_is_alive_non_header(_saved_cl);
   404   }
   405 };
   407 // A utility class to temporarily change the disposition
   408 // of the "discovery_is_atomic" field of the
   409 // given ReferenceProcessor in the scope that contains it.
   410 class ReferenceProcessorAtomicMutator: StackObj {
   411  private:
   412   ReferenceProcessor* _rp;
   413   bool                _saved_atomic_discovery;
   415  public:
   416   ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
   417                                   bool atomic):
   418     _rp(rp) {
   419     _saved_atomic_discovery = _rp->discovery_is_atomic();
   420     _rp->set_atomic_discovery(atomic);
   421   }
   423   ~ReferenceProcessorAtomicMutator() {
   424     _rp->set_atomic_discovery(_saved_atomic_discovery);
   425   }
   426 };
   429 // A utility class to temporarily change the MT processing
   430 // disposition of the given ReferenceProcessor instance
   431 // in the scope that contains it.
   432 class ReferenceProcessorMTProcMutator: StackObj {
   433  private:
   434   ReferenceProcessor* _rp;
   435   bool  _saved_mt;
   437  public:
   438   ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
   439                                   bool mt):
   440     _rp(rp) {
   441     _saved_mt = _rp->processing_is_mt();
   442     _rp->set_mt_processing(mt);
   443   }
   445   ~ReferenceProcessorMTProcMutator() {
   446     _rp->set_mt_processing(_saved_mt);
   447   }
   448 };
   451 // This class is an interface used to implement task execution for the
   452 // reference processing.
   453 class AbstractRefProcTaskExecutor {
   454 public:
   456   // Abstract tasks to execute.
   457   class ProcessTask;
   458   class EnqueueTask;
   460   // Executes a task using worker threads.
   461   virtual void execute(ProcessTask& task) = 0;
   462   virtual void execute(EnqueueTask& task) = 0;
   464   // Switch to single threaded mode.
   465   virtual void set_single_threaded_mode() { };
   466 };
   468 // Abstract reference processing task to execute.
   469 class AbstractRefProcTaskExecutor::ProcessTask {
   470 protected:
   471   ProcessTask(ReferenceProcessor& ref_processor,
   472               DiscoveredList      refs_lists[],
   473               bool                marks_oops_alive)
   474     : _ref_processor(ref_processor),
   475       _refs_lists(refs_lists),
   476       _marks_oops_alive(marks_oops_alive)
   477   { }
   479 public:
   480   virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
   481                     OopClosure& keep_alive,
   482                     VoidClosure& complete_gc) = 0;
   484   // Returns true if a task marks some oops as alive.
   485   bool marks_oops_alive() const
   486   { return _marks_oops_alive; }
   488 protected:
   489   ReferenceProcessor& _ref_processor;
   490   DiscoveredList*     _refs_lists;
   491   const bool          _marks_oops_alive;
   492 };
   494 // Abstract reference processing task to execute.
   495 class AbstractRefProcTaskExecutor::EnqueueTask {
   496 protected:
   497   EnqueueTask(ReferenceProcessor& ref_processor,
   498               DiscoveredList      refs_lists[],
   499               HeapWord*           pending_list_addr,
   500               oop                 sentinel_ref,
   501               int                 n_queues)
   502     : _ref_processor(ref_processor),
   503       _refs_lists(refs_lists),
   504       _pending_list_addr(pending_list_addr),
   505       _sentinel_ref(sentinel_ref),
   506       _n_queues(n_queues)
   507   { }
   509 public:
   510   virtual void work(unsigned int work_id) = 0;
   512 protected:
   513   ReferenceProcessor& _ref_processor;
   514   DiscoveredList*     _refs_lists;
   515   HeapWord*           _pending_list_addr;
   516   oop                 _sentinel_ref;
   517   int                 _n_queues;
   518 };

mercurial