src/share/vm/memory/referenceProcessor.hpp

Wed, 07 Sep 2011 13:55:42 -0700

author
ysr
date
Wed, 07 Sep 2011 13:55:42 -0700
changeset 3117
eca1193ca245
parent 3115
c2bf0120ee5d
child 3175
4dfb2df418f2
permissions
-rw-r--r--

4965777: GC changes to support use of discovered field for pending references
Summary: If and when the reference handler thread is able to use the discovered field to link reference objects in its pending list, so will GC. In that case, GC will scan through this field once a reference object has been placed on the pending list, but not scan that field before that stage, as the field is used by the concurrent GC thread to link discovered objects. When ReferenceHandleR thread does not use the discovered field for the purpose of linking the elements in the pending list, as would be the case in older JDKs, the JVM will fall back to the old behaviour of using the next field for that purpose.
Reviewed-by: jcoomes, mchung, stefank

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
    26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
    28 #include "memory/referencePolicy.hpp"
    29 #include "oops/instanceRefKlass.hpp"
    31 // ReferenceProcessor class encapsulates the per-"collector" processing
    32 // of java.lang.Reference objects for GC. The interface is useful for supporting
    33 // a generational abstraction, in particular when there are multiple
    34 // generations that are being independently collected -- possibly
    35 // concurrently and/or incrementally.  Note, however, that the
    36 // ReferenceProcessor class abstracts away from a generational setting
    37 // by using only a heap interval (called "span" below), thus allowing
    38 // its use in a straightforward manner in a general, non-generational
    39 // setting.
    40 //
    41 // The basic idea is that each ReferenceProcessor object concerns
    42 // itself with ("weak") reference processing in a specific "span"
    43 // of the heap of interest to a specific collector. Currently,
    44 // the span is a convex interval of the heap, but, efficiency
    45 // apart, there seems to be no reason it couldn't be extended
    46 // (with appropriate modifications) to any "non-convex interval".
    48 // forward references
    49 class ReferencePolicy;
    50 class AbstractRefProcTaskExecutor;
    51 class DiscoveredList;
    53 class ReferenceProcessor : public CHeapObj {
    54  protected:
    55   // Compatibility with pre-4965777 JDK's
    56   static bool _pending_list_uses_discovered_field;
    57   MemRegion   _span; // (right-open) interval of heap
    58                      // subject to wkref discovery
    59   bool        _discovering_refs;      // true when discovery enabled
    60   bool        _discovery_is_atomic;   // if discovery is atomic wrt
    61                                       // other collectors in configuration
    62   bool        _discovery_is_mt;       // true if reference discovery is MT.
    63   // If true, setting "next" field of a discovered refs list requires
    64   // write barrier(s).  (Must be true if used in a collector in which
    65   // elements of a discovered list may be moved during discovery: for
    66   // example, a collector like Garbage-First that moves objects during a
    67   // long-term concurrent marking phase that does weak reference
    68   // discovery.)
    69   bool        _discovered_list_needs_barrier;
    70   BarrierSet* _bs;                    // Cached copy of BarrierSet.
    71   bool        _enqueuing_is_done;     // true if all weak references enqueued
    72   bool        _processing_is_mt;      // true during phases when
    73                                       // reference processing is MT.
    74   int         _next_id;               // round-robin mod _num_q counter in
    75                                       // support of work distribution
    77   // For collectors that do not keep GC marking information
    78   // in the object header, this field holds a closure that
    79   // helps the reference processor determine the reachability
    80   // of an oop (the field is currently initialized to NULL for
    81   // all collectors but the CMS collector).
    82   BoolObjectClosure* _is_alive_non_header;
    84   // Soft ref clearing policies
    85   // . the default policy
    86   static ReferencePolicy*   _default_soft_ref_policy;
    87   // . the "clear all" policy
    88   static ReferencePolicy*   _always_clear_soft_ref_policy;
    89   // . the current policy below is either one of the above
    90   ReferencePolicy*          _current_soft_ref_policy;
    92   // The discovered ref lists themselves
    94   // The active MT'ness degree of the queues below
    95   int             _num_q;
    96   // The maximum MT'ness degree of the queues below
    97   int             _max_num_q;
    98   // Arrays of lists of oops, one per thread
    99   DiscoveredList* _discoveredSoftRefs;
   100   DiscoveredList* _discoveredWeakRefs;
   101   DiscoveredList* _discoveredFinalRefs;
   102   DiscoveredList* _discoveredPhantomRefs;
   104  public:
   105   int num_q()                            { return _num_q; }
   106   int max_num_q()                        { return _max_num_q; }
   107   void set_active_mt_degree(int v)       { _num_q = v; }
   108   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
   109   ReferencePolicy* setup_policy(bool always_clear) {
   110     _current_soft_ref_policy = always_clear ?
   111       _always_clear_soft_ref_policy : _default_soft_ref_policy;
   112     _current_soft_ref_policy->setup();   // snapshot the policy threshold
   113     return _current_soft_ref_policy;
   114   }
   116   // Process references with a certain reachability level.
   117   void process_discovered_reflist(DiscoveredList               refs_lists[],
   118                                   ReferencePolicy*             policy,
   119                                   bool                         clear_referent,
   120                                   BoolObjectClosure*           is_alive,
   121                                   OopClosure*                  keep_alive,
   122                                   VoidClosure*                 complete_gc,
   123                                   AbstractRefProcTaskExecutor* task_executor);
   125   void process_phaseJNI(BoolObjectClosure* is_alive,
   126                         OopClosure*        keep_alive,
   127                         VoidClosure*       complete_gc);
   129   // Work methods used by the method process_discovered_reflist
   130   // Phase1: keep alive all those referents that are otherwise
   131   // dead but which must be kept alive by policy (and their closure).
   132   void process_phase1(DiscoveredList&     refs_list,
   133                       ReferencePolicy*    policy,
   134                       BoolObjectClosure*  is_alive,
   135                       OopClosure*         keep_alive,
   136                       VoidClosure*        complete_gc);
   137   // Phase2: remove all those references whose referents are
   138   // reachable.
   139   inline void process_phase2(DiscoveredList&    refs_list,
   140                              BoolObjectClosure* is_alive,
   141                              OopClosure*        keep_alive,
   142                              VoidClosure*       complete_gc) {
   143     if (discovery_is_atomic()) {
   144       // complete_gc is ignored in this case for this phase
   145       pp2_work(refs_list, is_alive, keep_alive);
   146     } else {
   147       assert(complete_gc != NULL, "Error");
   148       pp2_work_concurrent_discovery(refs_list, is_alive,
   149                                     keep_alive, complete_gc);
   150     }
   151   }
   152   // Work methods in support of process_phase2
   153   void pp2_work(DiscoveredList&    refs_list,
   154                 BoolObjectClosure* is_alive,
   155                 OopClosure*        keep_alive);
   156   void pp2_work_concurrent_discovery(
   157                 DiscoveredList&    refs_list,
   158                 BoolObjectClosure* is_alive,
   159                 OopClosure*        keep_alive,
   160                 VoidClosure*       complete_gc);
   161   // Phase3: process the referents by either clearing them
   162   // or keeping them alive (and their closure)
   163   void process_phase3(DiscoveredList&    refs_list,
   164                       bool               clear_referent,
   165                       BoolObjectClosure* is_alive,
   166                       OopClosure*        keep_alive,
   167                       VoidClosure*       complete_gc);
   169   // Enqueue references with a certain reachability level
   170   void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
   172   // "Preclean" all the discovered reference lists
   173   // by removing references with strongly reachable referents.
   174   // The first argument is a predicate on an oop that indicates
   175   // its (strong) reachability and the second is a closure that
   176   // may be used to incrementalize or abort the precleaning process.
   177   // The caller is responsible for taking care of potential
   178   // interference with concurrent operations on these lists
   179   // (or predicates involved) by other threads. Currently
   180   // only used by the CMS collector.  should_unload_classes is
   181   // used to aid assertion checking when classes are collected.
   182   void preclean_discovered_references(BoolObjectClosure* is_alive,
   183                                       OopClosure*        keep_alive,
   184                                       VoidClosure*       complete_gc,
   185                                       YieldClosure*      yield,
   186                                       bool               should_unload_classes);
   188   // Delete entries in the discovered lists that have
   189   // either a null referent or are not active. Such
   190   // Reference objects can result from the clearing
   191   // or enqueueing of Reference objects concurrent
   192   // with their discovery by a (concurrent) collector.
   193   // For a definition of "active" see java.lang.ref.Reference;
   194   // Refs are born active, become inactive when enqueued,
   195   // and never become active again. The state of being
   196   // active is encoded as follows: A Ref is active
   197   // if and only if its "next" field is NULL.
   198   void clean_up_discovered_references();
   199   void clean_up_discovered_reflist(DiscoveredList& refs_list);
   201   // Returns the name of the discovered reference list
   202   // occupying the i / _num_q slot.
   203   const char* list_name(int i);
   205   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
   207  protected:
   208   // "Preclean" the given discovered reference list
   209   // by removing references with strongly reachable referents.
   210   // Currently used in support of CMS only.
   211   void preclean_discovered_reflist(DiscoveredList&    refs_list,
   212                                    BoolObjectClosure* is_alive,
   213                                    OopClosure*        keep_alive,
   214                                    VoidClosure*       complete_gc,
   215                                    YieldClosure*      yield);
   217   // round-robin mod _num_q (not: _not_ mode _max_num_q)
   218   int next_id() {
   219     int id = _next_id;
   220     if (++_next_id == _num_q) {
   221       _next_id = 0;
   222     }
   223     return id;
   224   }
   225   DiscoveredList* get_discovered_list(ReferenceType rt);
   226   inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
   227                                         HeapWord* discovered_addr);
   228   void verify_ok_to_handle_reflists() PRODUCT_RETURN;
   230   void clear_discovered_references(DiscoveredList& refs_list);
   231   void abandon_partial_discovered_list(DiscoveredList& refs_list);
   233   // Calculate the number of jni handles.
   234   unsigned int count_jni_refs();
   236   // Balances reference queues.
   237   void balance_queues(DiscoveredList ref_lists[]);
   239   // Update (advance) the soft ref master clock field.
   240   void update_soft_ref_master_clock();
   242  public:
   243   // constructor
   244   ReferenceProcessor():
   245     _span((HeapWord*)NULL, (HeapWord*)NULL),
   246     _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
   247     _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
   248     _discovering_refs(false),
   249     _discovery_is_atomic(true),
   250     _enqueuing_is_done(false),
   251     _discovery_is_mt(false),
   252     _discovered_list_needs_barrier(false),
   253     _bs(NULL),
   254     _is_alive_non_header(NULL),
   255     _num_q(0),
   256     _max_num_q(0),
   257     _processing_is_mt(false),
   258     _next_id(0)
   259   { }
   261   // Default parameters give you a vanilla reference processor.
   262   ReferenceProcessor(MemRegion span,
   263                      bool mt_processing = false, int mt_processing_degree = 1,
   264                      bool mt_discovery  = false, int mt_discovery_degree  = 1,
   265                      bool atomic_discovery = true,
   266                      BoolObjectClosure* is_alive_non_header = NULL,
   267                      bool discovered_list_needs_barrier = false);
   269   // RefDiscoveryPolicy values
   270   enum DiscoveryPolicy {
   271     ReferenceBasedDiscovery = 0,
   272     ReferentBasedDiscovery  = 1,
   273     DiscoveryPolicyMin      = ReferenceBasedDiscovery,
   274     DiscoveryPolicyMax      = ReferentBasedDiscovery
   275   };
   277   static void init_statics();
   279  public:
   280   // get and set "is_alive_non_header" field
   281   BoolObjectClosure* is_alive_non_header() {
   282     return _is_alive_non_header;
   283   }
   284   void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
   285     _is_alive_non_header = is_alive_non_header;
   286   }
   288   // get and set span
   289   MemRegion span()                   { return _span; }
   290   void      set_span(MemRegion span) { _span = span; }
   292   // start and stop weak ref discovery
   293   void enable_discovery()   { _discovering_refs = true;  }
   294   void disable_discovery()  { _discovering_refs = false; }
   295   bool discovery_enabled()  { return _discovering_refs;  }
   297   // whether discovery is atomic wrt other collectors
   298   bool discovery_is_atomic() const { return _discovery_is_atomic; }
   299   void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
   301   // whether the JDK in which we are embedded is a pre-4965777 JDK,
   302   // and thus whether or not it uses the discovered field to chain
   303   // the entries in the pending list.
   304   static bool pending_list_uses_discovered_field() {
   305     return _pending_list_uses_discovered_field;
   306   }
   308   // whether discovery is done by multiple threads same-old-timeously
   309   bool discovery_is_mt() const { return _discovery_is_mt; }
   310   void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
   312   // Whether we are in a phase when _processing_ is MT.
   313   bool processing_is_mt() const { return _processing_is_mt; }
   314   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
   316   // whether all enqueuing of weak references is complete
   317   bool enqueuing_is_done()  { return _enqueuing_is_done; }
   318   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
   320   // iterate over oops
   321   void weak_oops_do(OopClosure* f);       // weak roots
   323   // Balance each of the discovered lists.
   324   void balance_all_queues();
   326   // Discover a Reference object, using appropriate discovery criteria
   327   bool discover_reference(oop obj, ReferenceType rt);
   329   // Process references found during GC (called by the garbage collector)
   330   void process_discovered_references(BoolObjectClosure*           is_alive,
   331                                      OopClosure*                  keep_alive,
   332                                      VoidClosure*                 complete_gc,
   333                                      AbstractRefProcTaskExecutor* task_executor);
   335  public:
   336   // Enqueue references at end of GC (called by the garbage collector)
   337   bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
   339   // If a discovery is in process that is being superceded, abandon it: all
   340   // the discovered lists will be empty, and all the objects on them will
   341   // have NULL discovered fields.  Must be called only at a safepoint.
   342   void abandon_partial_discovery();
   344   // debugging
   345   void verify_no_references_recorded() PRODUCT_RETURN;
   346   void verify_referent(oop obj)        PRODUCT_RETURN;
   348   // clear the discovered lists (unlinking each entry).
   349   void clear_discovered_references() PRODUCT_RETURN;
   350 };
   352 // A utility class to disable reference discovery in
   353 // the scope which contains it, for given ReferenceProcessor.
   354 class NoRefDiscovery: StackObj {
   355  private:
   356   ReferenceProcessor* _rp;
   357   bool _was_discovering_refs;
   358  public:
   359   NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
   360     _was_discovering_refs = _rp->discovery_enabled();
   361     if (_was_discovering_refs) {
   362       _rp->disable_discovery();
   363     }
   364   }
   366   ~NoRefDiscovery() {
   367     if (_was_discovering_refs) {
   368       _rp->enable_discovery();
   369     }
   370   }
   371 };
   374 // A utility class to temporarily mutate the span of the
   375 // given ReferenceProcessor in the scope that contains it.
   376 class ReferenceProcessorSpanMutator: StackObj {
   377  private:
   378   ReferenceProcessor* _rp;
   379   MemRegion           _saved_span;
   381  public:
   382   ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
   383                                 MemRegion span):
   384     _rp(rp) {
   385     _saved_span = _rp->span();
   386     _rp->set_span(span);
   387   }
   389   ~ReferenceProcessorSpanMutator() {
   390     _rp->set_span(_saved_span);
   391   }
   392 };
   394 // A utility class to temporarily change the MT'ness of
   395 // reference discovery for the given ReferenceProcessor
   396 // in the scope that contains it.
   397 class ReferenceProcessorMTDiscoveryMutator: StackObj {
   398  private:
   399   ReferenceProcessor* _rp;
   400   bool                _saved_mt;
   402  public:
   403   ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
   404                                        bool mt):
   405     _rp(rp) {
   406     _saved_mt = _rp->discovery_is_mt();
   407     _rp->set_mt_discovery(mt);
   408   }
   410   ~ReferenceProcessorMTDiscoveryMutator() {
   411     _rp->set_mt_discovery(_saved_mt);
   412   }
   413 };
   416 // A utility class to temporarily change the disposition
   417 // of the "is_alive_non_header" closure field of the
   418 // given ReferenceProcessor in the scope that contains it.
   419 class ReferenceProcessorIsAliveMutator: StackObj {
   420  private:
   421   ReferenceProcessor* _rp;
   422   BoolObjectClosure*  _saved_cl;
   424  public:
   425   ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
   426                                    BoolObjectClosure*  cl):
   427     _rp(rp) {
   428     _saved_cl = _rp->is_alive_non_header();
   429     _rp->set_is_alive_non_header(cl);
   430   }
   432   ~ReferenceProcessorIsAliveMutator() {
   433     _rp->set_is_alive_non_header(_saved_cl);
   434   }
   435 };
   437 // A utility class to temporarily change the disposition
   438 // of the "discovery_is_atomic" field of the
   439 // given ReferenceProcessor in the scope that contains it.
   440 class ReferenceProcessorAtomicMutator: StackObj {
   441  private:
   442   ReferenceProcessor* _rp;
   443   bool                _saved_atomic_discovery;
   445  public:
   446   ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
   447                                   bool atomic):
   448     _rp(rp) {
   449     _saved_atomic_discovery = _rp->discovery_is_atomic();
   450     _rp->set_atomic_discovery(atomic);
   451   }
   453   ~ReferenceProcessorAtomicMutator() {
   454     _rp->set_atomic_discovery(_saved_atomic_discovery);
   455   }
   456 };
   459 // A utility class to temporarily change the MT processing
   460 // disposition of the given ReferenceProcessor instance
   461 // in the scope that contains it.
   462 class ReferenceProcessorMTProcMutator: StackObj {
   463  private:
   464   ReferenceProcessor* _rp;
   465   bool  _saved_mt;
   467  public:
   468   ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
   469                                   bool mt):
   470     _rp(rp) {
   471     _saved_mt = _rp->processing_is_mt();
   472     _rp->set_mt_processing(mt);
   473   }
   475   ~ReferenceProcessorMTProcMutator() {
   476     _rp->set_mt_processing(_saved_mt);
   477   }
   478 };
   481 // This class is an interface used to implement task execution for the
   482 // reference processing.
   483 class AbstractRefProcTaskExecutor {
   484 public:
   486   // Abstract tasks to execute.
   487   class ProcessTask;
   488   class EnqueueTask;
   490   // Executes a task using worker threads.
   491   virtual void execute(ProcessTask& task) = 0;
   492   virtual void execute(EnqueueTask& task) = 0;
   494   // Switch to single threaded mode.
   495   virtual void set_single_threaded_mode() { };
   496 };
   498 // Abstract reference processing task to execute.
   499 class AbstractRefProcTaskExecutor::ProcessTask {
   500 protected:
   501   ProcessTask(ReferenceProcessor& ref_processor,
   502               DiscoveredList      refs_lists[],
   503               bool                marks_oops_alive)
   504     : _ref_processor(ref_processor),
   505       _refs_lists(refs_lists),
   506       _marks_oops_alive(marks_oops_alive)
   507   { }
   509 public:
   510   virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
   511                     OopClosure& keep_alive,
   512                     VoidClosure& complete_gc) = 0;
   514   // Returns true if a task marks some oops as alive.
   515   bool marks_oops_alive() const
   516   { return _marks_oops_alive; }
   518 protected:
   519   ReferenceProcessor& _ref_processor;
   520   DiscoveredList*     _refs_lists;
   521   const bool          _marks_oops_alive;
   522 };
   524 // Abstract reference processing task to execute.
   525 class AbstractRefProcTaskExecutor::EnqueueTask {
   526 protected:
   527   EnqueueTask(ReferenceProcessor& ref_processor,
   528               DiscoveredList      refs_lists[],
   529               HeapWord*           pending_list_addr,
   530               int                 n_queues)
   531     : _ref_processor(ref_processor),
   532       _refs_lists(refs_lists),
   533       _pending_list_addr(pending_list_addr),
   534       _n_queues(n_queues)
   535   { }
   537 public:
   538   virtual void work(unsigned int work_id) = 0;
   540 protected:
   541   ReferenceProcessor& _ref_processor;
   542   DiscoveredList*     _refs_lists;
   543   HeapWord*           _pending_list_addr;
   544   int                 _n_queues;
   545 };
   547 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP

mercurial