src/share/vm/memory/referenceProcessor.cpp

Fri, 11 Feb 2011 14:15:16 +0100

author
stefank
date
Fri, 11 Feb 2011 14:15:16 +0100
changeset 2537
55cc33cf55bc
parent 2337
8df09fb45352
child 2651
92da084fefc9
permissions
-rw-r--r--

7018257: jmm_DumpThreads allocates into permgen
Summary: Don't allocate in permgen
Reviewed-by: ysr, sla

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/javaClasses.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "gc_interface/collectedHeap.hpp"
    29 #include "gc_interface/collectedHeap.inline.hpp"
    30 #include "memory/referencePolicy.hpp"
    31 #include "memory/referenceProcessor.hpp"
    32 #include "oops/oop.inline.hpp"
    33 #include "runtime/java.hpp"
    34 #include "runtime/jniHandles.hpp"
    36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
    37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
    38 oop              ReferenceProcessor::_sentinelRef = NULL;
    39 const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;
    41 // List of discovered references.
    42 class DiscoveredList {
    43 public:
    44   DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
    45   oop head() const     {
    46      return UseCompressedOops ?  oopDesc::decode_heap_oop_not_null(_compressed_head) :
    47                                 _oop_head;
    48   }
    49   HeapWord* adr_head() {
    50     return UseCompressedOops ? (HeapWord*)&_compressed_head :
    51                                (HeapWord*)&_oop_head;
    52   }
    53   void   set_head(oop o) {
    54     if (UseCompressedOops) {
    55       // Must compress the head ptr.
    56       _compressed_head = oopDesc::encode_heap_oop_not_null(o);
    57     } else {
    58       _oop_head = o;
    59     }
    60   }
    61   bool   empty() const          { return head() == ReferenceProcessor::sentinel_ref(); }
    62   size_t length()               { return _len; }
    63   void   set_length(size_t len) { _len = len;  }
    64   void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
    65   void   dec_length(size_t dec) { _len -= dec; }
    66 private:
    67   // Set value depending on UseCompressedOops. This could be a template class
    68   // but then we have to fix all the instantiations and declarations that use this class.
    69   oop       _oop_head;
    70   narrowOop _compressed_head;
    71   size_t _len;
    72 };
    74 void referenceProcessor_init() {
    75   ReferenceProcessor::init_statics();
    76 }
    78 void ReferenceProcessor::init_statics() {
    79   assert(_sentinelRef == NULL, "should be initialized precisely once");
    80   EXCEPTION_MARK;
    81   _sentinelRef = instanceKlass::cast(
    82                     SystemDictionary::Reference_klass())->
    83                       allocate_permanent_instance(THREAD);
    85   // Initialize the master soft ref clock.
    86   java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
    88   if (HAS_PENDING_EXCEPTION) {
    89       Handle ex(THREAD, PENDING_EXCEPTION);
    90       vm_exit_during_initialization(ex);
    91   }
    92   assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
    93          "Just constructed it!");
    94   _always_clear_soft_ref_policy = new AlwaysClearPolicy();
    95   _default_soft_ref_policy      = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
    96                                       NOT_COMPILER2(LRUCurrentHeapPolicy());
    97   if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
    98     vm_exit_during_initialization("Could not allocate reference policy object");
    99   }
   100   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
   101             RefDiscoveryPolicy == ReferentBasedDiscovery,
   102             "Unrecongnized RefDiscoveryPolicy");
   103 }
   105 ReferenceProcessor*
   106 ReferenceProcessor::create_ref_processor(MemRegion          span,
   107                                          bool               atomic_discovery,
   108                                          bool               mt_discovery,
   109                                          BoolObjectClosure* is_alive_non_header,
   110                                          int                parallel_gc_threads,
   111                                          bool               mt_processing,
   112                                          bool               dl_needs_barrier) {
   113   int mt_degree = 1;
   114   if (parallel_gc_threads > 1) {
   115     mt_degree = parallel_gc_threads;
   116   }
   117   ReferenceProcessor* rp =
   118     new ReferenceProcessor(span, atomic_discovery,
   119                            mt_discovery, mt_degree,
   120                            mt_processing && (parallel_gc_threads > 0),
   121                            dl_needs_barrier);
   122   if (rp == NULL) {
   123     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
   124   }
   125   rp->set_is_alive_non_header(is_alive_non_header);
   126   rp->setup_policy(false /* default soft ref policy */);
   127   return rp;
   128 }
   130 ReferenceProcessor::ReferenceProcessor(MemRegion span,
   131                                        bool      atomic_discovery,
   132                                        bool      mt_discovery,
   133                                        int       mt_degree,
   134                                        bool      mt_processing,
   135                                        bool      discovered_list_needs_barrier)  :
   136   _discovering_refs(false),
   137   _enqueuing_is_done(false),
   138   _is_alive_non_header(NULL),
   139   _discovered_list_needs_barrier(discovered_list_needs_barrier),
   140   _bs(NULL),
   141   _processing_is_mt(mt_processing),
   142   _next_id(0)
   143 {
   144   _span = span;
   145   _discovery_is_atomic = atomic_discovery;
   146   _discovery_is_mt     = mt_discovery;
   147   _num_q               = mt_degree;
   148   _max_num_q           = mt_degree;
   149   _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
   150   if (_discoveredSoftRefs == NULL) {
   151     vm_exit_during_initialization("Could not allocated RefProc Array");
   152   }
   153   _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_q];
   154   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
   155   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
   156   assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
   157   // Initialized all entries to _sentinelRef
   158   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
   159         _discoveredSoftRefs[i].set_head(sentinel_ref());
   160     _discoveredSoftRefs[i].set_length(0);
   161   }
   162   // If we do barreirs, cache a copy of the barrier set.
   163   if (discovered_list_needs_barrier) {
   164     _bs = Universe::heap()->barrier_set();
   165   }
   166 }
   168 #ifndef PRODUCT
   169 void ReferenceProcessor::verify_no_references_recorded() {
   170   guarantee(!_discovering_refs, "Discovering refs?");
   171   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
   172     guarantee(_discoveredSoftRefs[i].empty(),
   173               "Found non-empty discovered list");
   174   }
   175 }
   176 #endif
   178 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
   179   // Should this instead be
   180   // for (int i = 0; i < subclasses_of_ref; i++_ {
   181   //   for (int j = 0; j < _num_q; j++) {
   182   //     int index = i * _max_num_q + j;
   183   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
   184     if (UseCompressedOops) {
   185       f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
   186     } else {
   187       f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
   188     }
   189   }
   190 }
   192 void ReferenceProcessor::oops_do(OopClosure* f) {
   193   f->do_oop(adr_sentinel_ref());
   194 }
   196 void ReferenceProcessor::update_soft_ref_master_clock() {
   197   // Update (advance) the soft ref master clock field. This must be done
   198   // after processing the soft ref list.
   199   jlong now = os::javaTimeMillis();
   200   jlong clock = java_lang_ref_SoftReference::clock();
   201   NOT_PRODUCT(
   202   if (now < clock) {
   203     warning("time warp: %d to %d", clock, now);
   204   }
   205   )
   206   // In product mode, protect ourselves from system time being adjusted
   207   // externally and going backward; see note in the implementation of
   208   // GenCollectedHeap::time_since_last_gc() for the right way to fix
   209   // this uniformly throughout the VM; see bug-id 4741166. XXX
   210   if (now > clock) {
   211     java_lang_ref_SoftReference::set_clock(now);
   212   }
   213   // Else leave clock stalled at its old value until time progresses
   214   // past clock value.
   215 }
   217 void ReferenceProcessor::process_discovered_references(
   218   BoolObjectClosure*           is_alive,
   219   OopClosure*                  keep_alive,
   220   VoidClosure*                 complete_gc,
   221   AbstractRefProcTaskExecutor* task_executor) {
   222   NOT_PRODUCT(verify_ok_to_handle_reflists());
   224   assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
   225   // Stop treating discovered references specially.
   226   disable_discovery();
   228   bool trace_time = PrintGCDetails && PrintReferenceGC;
   229   // Soft references
   230   {
   231     TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
   232     process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
   233                                is_alive, keep_alive, complete_gc, task_executor);
   234   }
   236   update_soft_ref_master_clock();
   238   // Weak references
   239   {
   240     TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
   241     process_discovered_reflist(_discoveredWeakRefs, NULL, true,
   242                                is_alive, keep_alive, complete_gc, task_executor);
   243   }
   245   // Final references
   246   {
   247     TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
   248     process_discovered_reflist(_discoveredFinalRefs, NULL, false,
   249                                is_alive, keep_alive, complete_gc, task_executor);
   250   }
   252   // Phantom references
   253   {
   254     TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
   255     process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
   256                                is_alive, keep_alive, complete_gc, task_executor);
   257   }
   259   // Weak global JNI references. It would make more sense (semantically) to
   260   // traverse these simultaneously with the regular weak references above, but
   261   // that is not how the JDK1.2 specification is. See #4126360. Native code can
   262   // thus use JNI weak references to circumvent the phantom references and
   263   // resurrect a "post-mortem" object.
   264   {
   265     TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
   266     if (task_executor != NULL) {
   267       task_executor->set_single_threaded_mode();
   268     }
   269     process_phaseJNI(is_alive, keep_alive, complete_gc);
   270   }
   271 }
   273 #ifndef PRODUCT
   274 // Calculate the number of jni handles.
   275 uint ReferenceProcessor::count_jni_refs() {
   276   class AlwaysAliveClosure: public BoolObjectClosure {
   277   public:
   278     virtual bool do_object_b(oop obj) { return true; }
   279     virtual void do_object(oop obj) { assert(false, "Don't call"); }
   280   };
   282   class CountHandleClosure: public OopClosure {
   283   private:
   284     int _count;
   285   public:
   286     CountHandleClosure(): _count(0) {}
   287     void do_oop(oop* unused)       { _count++; }
   288     void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
   289     int count() { return _count; }
   290   };
   291   CountHandleClosure global_handle_count;
   292   AlwaysAliveClosure always_alive;
   293   JNIHandles::weak_oops_do(&always_alive, &global_handle_count);
   294   return global_handle_count.count();
   295 }
   296 #endif
   298 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
   299                                           OopClosure*        keep_alive,
   300                                           VoidClosure*       complete_gc) {
   301 #ifndef PRODUCT
   302   if (PrintGCDetails && PrintReferenceGC) {
   303     unsigned int count = count_jni_refs();
   304     gclog_or_tty->print(", %u refs", count);
   305   }
   306 #endif
   307   JNIHandles::weak_oops_do(is_alive, keep_alive);
   308   // Finally remember to keep sentinel around
   309   keep_alive->do_oop(adr_sentinel_ref());
   310   complete_gc->do_void();
   311 }
   314 template <class T>
   315 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
   316                                    AbstractRefProcTaskExecutor* task_executor) {
   318   // Remember old value of pending references list
   319   T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
   320   T old_pending_list_value = *pending_list_addr;
   322   // Enqueue references that are not made active again, and
   323   // clear the decks for the next collection (cycle).
   324   ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
   325   // Do the oop-check on pending_list_addr missed in
   326   // enqueue_discovered_reflist. We should probably
   327   // do a raw oop_check so that future such idempotent
   328   // oop_stores relying on the oop-check side-effect
   329   // may be elided automatically and safely without
   330   // affecting correctness.
   331   oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
   333   // Stop treating discovered references specially.
   334   ref->disable_discovery();
   336   // Return true if new pending references were added
   337   return old_pending_list_value != *pending_list_addr;
   338 }
   340 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
   341   NOT_PRODUCT(verify_ok_to_handle_reflists());
   342   if (UseCompressedOops) {
   343     return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
   344   } else {
   345     return enqueue_discovered_ref_helper<oop>(this, task_executor);
   346   }
   347 }
   349 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
   350                                                     HeapWord* pending_list_addr) {
   351   // Given a list of refs linked through the "discovered" field
   352   // (java.lang.ref.Reference.discovered) chain them through the
   353   // "next" field (java.lang.ref.Reference.next) and prepend
   354   // to the pending list.
   355   if (TraceReferenceGC && PrintGCDetails) {
   356     gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
   357                            INTPTR_FORMAT, (address)refs_list.head());
   358   }
   359   oop obj = refs_list.head();
   360   // Walk down the list, copying the discovered field into
   361   // the next field and clearing it (except for the last
   362   // non-sentinel object which is treated specially to avoid
   363   // confusion with an active reference).
   364   while (obj != sentinel_ref()) {
   365     assert(obj->is_instanceRef(), "should be reference object");
   366     oop next = java_lang_ref_Reference::discovered(obj);
   367     if (TraceReferenceGC && PrintGCDetails) {
   368       gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
   369                              obj, next);
   370     }
   371     assert(java_lang_ref_Reference::next(obj) == NULL,
   372            "The reference should not be enqueued");
   373     if (next == sentinel_ref()) {  // obj is last
   374       // Swap refs_list into pendling_list_addr and
   375       // set obj's next to what we read from pending_list_addr.
   376       oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
   377       // Need oop_check on pending_list_addr above;
   378       // see special oop-check code at the end of
   379       // enqueue_discovered_reflists() further below.
   380       if (old == NULL) {
   381         // obj should be made to point to itself, since
   382         // pending list was empty.
   383         java_lang_ref_Reference::set_next(obj, obj);
   384       } else {
   385         java_lang_ref_Reference::set_next(obj, old);
   386       }
   387     } else {
   388       java_lang_ref_Reference::set_next(obj, next);
   389     }
   390     java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
   391     obj = next;
   392   }
   393 }
   395 // Parallel enqueue task
   396 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
   397 public:
   398   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
   399                      DiscoveredList      discovered_refs[],
   400                      HeapWord*           pending_list_addr,
   401                      oop                 sentinel_ref,
   402                      int                 n_queues)
   403     : EnqueueTask(ref_processor, discovered_refs,
   404                   pending_list_addr, sentinel_ref, n_queues)
   405   { }
   407   virtual void work(unsigned int work_id) {
   408     assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
   409     // Simplest first cut: static partitioning.
   410     int index = work_id;
   411     // The increment on "index" must correspond to the maximum number of queues
   412     // (n_queues) with which that ReferenceProcessor was created.  That
   413     // is because of the "clever" way the discovered references lists were
   414     // allocated and are indexed into.  That number is ParallelGCThreads
   415     // currently.  Assert that.
   416     assert(_n_queues == (int) ParallelGCThreads, "Different number not expected");
   417     for (int j = 0;
   418          j < subclasses_of_ref;
   419          j++, index += _n_queues) {
   420       _ref_processor.enqueue_discovered_reflist(
   421         _refs_lists[index], _pending_list_addr);
   422       _refs_lists[index].set_head(_sentinel_ref);
   423       _refs_lists[index].set_length(0);
   424     }
   425   }
   426 };
   428 // Enqueue references that are not made active again
   429 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
   430   AbstractRefProcTaskExecutor* task_executor) {
   431   if (_processing_is_mt && task_executor != NULL) {
   432     // Parallel code
   433     RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
   434                            pending_list_addr, sentinel_ref(), _max_num_q);
   435     task_executor->execute(tsk);
   436   } else {
   437     // Serial code: call the parent class's implementation
   438     for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
   439       enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
   440       _discoveredSoftRefs[i].set_head(sentinel_ref());
   441       _discoveredSoftRefs[i].set_length(0);
   442     }
   443   }
   444 }
   446 // Iterator for the list of discovered references.
   447 class DiscoveredListIterator {
   448 public:
   449   inline DiscoveredListIterator(DiscoveredList&    refs_list,
   450                                 OopClosure*        keep_alive,
   451                                 BoolObjectClosure* is_alive);
   453   // End Of List.
   454   inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
   456   // Get oop to the Reference object.
   457   inline oop obj() const { return _ref; }
   459   // Get oop to the referent object.
   460   inline oop referent() const { return _referent; }
   462   // Returns true if referent is alive.
   463   inline bool is_referent_alive() const;
   465   // Loads data for the current reference.
   466   // The "allow_null_referent" argument tells us to allow for the possibility
   467   // of a NULL referent in the discovered Reference object. This typically
   468   // happens in the case of concurrent collectors that may have done the
   469   // discovery concurrently, or interleaved, with mutator execution.
   470   inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
   472   // Move to the next discovered reference.
   473   inline void next();
   475   // Remove the current reference from the list
   476   inline void remove();
   478   // Make the Reference object active again.
   479   inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
   481   // Make the referent alive.
   482   inline void make_referent_alive() {
   483     if (UseCompressedOops) {
   484       _keep_alive->do_oop((narrowOop*)_referent_addr);
   485     } else {
   486       _keep_alive->do_oop((oop*)_referent_addr);
   487     }
   488   }
   490   // Update the discovered field.
   491   inline void update_discovered() {
   492     // First _prev_next ref actually points into DiscoveredList (gross).
   493     if (UseCompressedOops) {
   494       _keep_alive->do_oop((narrowOop*)_prev_next);
   495     } else {
   496       _keep_alive->do_oop((oop*)_prev_next);
   497     }
   498   }
   500   // NULL out referent pointer.
   501   inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
   503   // Statistics
   504   NOT_PRODUCT(
   505   inline size_t processed() const { return _processed; }
   506   inline size_t removed() const   { return _removed; }
   507   )
   509   inline void move_to_next();
   511 private:
   512   DiscoveredList&    _refs_list;
   513   HeapWord*          _prev_next;
   514   oop                _ref;
   515   HeapWord*          _discovered_addr;
   516   oop                _next;
   517   HeapWord*          _referent_addr;
   518   oop                _referent;
   519   OopClosure*        _keep_alive;
   520   BoolObjectClosure* _is_alive;
   521   DEBUG_ONLY(
   522   oop                _first_seen; // cyclic linked list check
   523   )
   524   NOT_PRODUCT(
   525   size_t             _processed;
   526   size_t             _removed;
   527   )
   528 };
   530 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList&    refs_list,
   531                                                       OopClosure*        keep_alive,
   532                                                       BoolObjectClosure* is_alive)
   533   : _refs_list(refs_list),
   534     _prev_next(refs_list.adr_head()),
   535     _ref(refs_list.head()),
   536 #ifdef ASSERT
   537     _first_seen(refs_list.head()),
   538 #endif
   539 #ifndef PRODUCT
   540     _processed(0),
   541     _removed(0),
   542 #endif
   543     _next(refs_list.head()),
   544     _keep_alive(keep_alive),
   545     _is_alive(is_alive)
   546 { }
   548 inline bool DiscoveredListIterator::is_referent_alive() const {
   549   return _is_alive->do_object_b(_referent);
   550 }
   552 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
   553   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
   554   oop discovered = java_lang_ref_Reference::discovered(_ref);
   555   assert(_discovered_addr && discovered->is_oop_or_null(),
   556          "discovered field is bad");
   557   _next = discovered;
   558   _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
   559   _referent = java_lang_ref_Reference::referent(_ref);
   560   assert(Universe::heap()->is_in_reserved_or_null(_referent),
   561          "Wrong oop found in java.lang.Reference object");
   562   assert(allow_null_referent ?
   563              _referent->is_oop_or_null()
   564            : _referent->is_oop(),
   565          "bad referent");
   566 }
   568 inline void DiscoveredListIterator::next() {
   569   _prev_next = _discovered_addr;
   570   move_to_next();
   571 }
   573 inline void DiscoveredListIterator::remove() {
   574   assert(_ref->is_oop(), "Dropping a bad reference");
   575   oop_store_raw(_discovered_addr, NULL);
   576   // First _prev_next ref actually points into DiscoveredList (gross).
   577   if (UseCompressedOops) {
   578     // Remove Reference object from list.
   579     oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
   580   } else {
   581     // Remove Reference object from list.
   582     oopDesc::store_heap_oop((oop*)_prev_next, _next);
   583   }
   584   NOT_PRODUCT(_removed++);
   585   _refs_list.dec_length(1);
   586 }
   588 inline void DiscoveredListIterator::move_to_next() {
   589   _ref = _next;
   590   assert(_ref != _first_seen, "cyclic ref_list found");
   591   NOT_PRODUCT(_processed++);
   592 }
   594 // NOTE: process_phase*() are largely similar, and at a high level
   595 // merely iterate over the extant list applying a predicate to
   596 // each of its elements and possibly removing that element from the
   597 // list and applying some further closures to that element.
   598 // We should consider the possibility of replacing these
   599 // process_phase*() methods by abstracting them into
   600 // a single general iterator invocation that receives appropriate
   601 // closures that accomplish this work.
   603 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
   604 // referents are not alive, but that should be kept alive for policy reasons.
   605 // Keep alive the transitive closure of all such referents.
   606 void
   607 ReferenceProcessor::process_phase1(DiscoveredList&    refs_list,
   608                                    ReferencePolicy*   policy,
   609                                    BoolObjectClosure* is_alive,
   610                                    OopClosure*        keep_alive,
   611                                    VoidClosure*       complete_gc) {
   612   assert(policy != NULL, "Must have a non-NULL policy");
   613   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   614   // Decide which softly reachable refs should be kept alive.
   615   while (iter.has_next()) {
   616     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
   617     bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
   618     if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
   619       if (TraceReferenceGC) {
   620         gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
   621                                iter.obj(), iter.obj()->blueprint()->internal_name());
   622       }
   623       // Remove Reference object from list
   624       iter.remove();
   625       // Make the Reference object active again
   626       iter.make_active();
   627       // keep the referent around
   628       iter.make_referent_alive();
   629       iter.move_to_next();
   630     } else {
   631       iter.next();
   632     }
   633   }
   634   // Close the reachable set
   635   complete_gc->do_void();
   636   NOT_PRODUCT(
   637     if (PrintGCDetails && TraceReferenceGC) {
   638       gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
   639         "discovered Refs by policy  list " INTPTR_FORMAT,
   640         iter.removed(), iter.processed(), (address)refs_list.head());
   641     }
   642   )
   643 }
   645 // Traverse the list and remove any Refs that are not active, or
   646 // whose referents are either alive or NULL.
   647 void
   648 ReferenceProcessor::pp2_work(DiscoveredList&    refs_list,
   649                              BoolObjectClosure* is_alive,
   650                              OopClosure*        keep_alive) {
   651   assert(discovery_is_atomic(), "Error");
   652   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   653   while (iter.has_next()) {
   654     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
   655     DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
   656     assert(next == NULL, "Should not discover inactive Reference");
   657     if (iter.is_referent_alive()) {
   658       if (TraceReferenceGC) {
   659         gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
   660                                iter.obj(), iter.obj()->blueprint()->internal_name());
   661       }
   662       // The referent is reachable after all.
   663       // Remove Reference object from list.
   664       iter.remove();
   665       // Update the referent pointer as necessary: Note that this
   666       // should not entail any recursive marking because the
   667       // referent must already have been traversed.
   668       iter.make_referent_alive();
   669       iter.move_to_next();
   670     } else {
   671       iter.next();
   672     }
   673   }
   674   NOT_PRODUCT(
   675     if (PrintGCDetails && TraceReferenceGC) {
   676       gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
   677         "Refs in discovered list " INTPTR_FORMAT,
   678         iter.removed(), iter.processed(), (address)refs_list.head());
   679     }
   680   )
   681 }
   683 void
   684 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList&    refs_list,
   685                                                   BoolObjectClosure* is_alive,
   686                                                   OopClosure*        keep_alive,
   687                                                   VoidClosure*       complete_gc) {
   688   assert(!discovery_is_atomic(), "Error");
   689   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   690   while (iter.has_next()) {
   691     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
   692     HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
   693     oop next = java_lang_ref_Reference::next(iter.obj());
   694     if ((iter.referent() == NULL || iter.is_referent_alive() ||
   695          next != NULL)) {
   696       assert(next->is_oop_or_null(), "bad next field");
   697       // Remove Reference object from list
   698       iter.remove();
   699       // Trace the cohorts
   700       iter.make_referent_alive();
   701       if (UseCompressedOops) {
   702         keep_alive->do_oop((narrowOop*)next_addr);
   703       } else {
   704         keep_alive->do_oop((oop*)next_addr);
   705       }
   706       iter.move_to_next();
   707     } else {
   708       iter.next();
   709     }
   710   }
   711   // Now close the newly reachable set
   712   complete_gc->do_void();
   713   NOT_PRODUCT(
   714     if (PrintGCDetails && TraceReferenceGC) {
   715       gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
   716         "Refs in discovered list " INTPTR_FORMAT,
   717         iter.removed(), iter.processed(), (address)refs_list.head());
   718     }
   719   )
   720 }
   722 // Traverse the list and process the referents, by either
   723 // clearing them or keeping them (and their reachable
   724 // closure) alive.
   725 void
   726 ReferenceProcessor::process_phase3(DiscoveredList&    refs_list,
   727                                    bool               clear_referent,
   728                                    BoolObjectClosure* is_alive,
   729                                    OopClosure*        keep_alive,
   730                                    VoidClosure*       complete_gc) {
   731   ResourceMark rm;
   732   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   733   while (iter.has_next()) {
   734     iter.update_discovered();
   735     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
   736     if (clear_referent) {
   737       // NULL out referent pointer
   738       iter.clear_referent();
   739     } else {
   740       // keep the referent around
   741       iter.make_referent_alive();
   742     }
   743     if (TraceReferenceGC) {
   744       gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
   745                              clear_referent ? "cleared " : "",
   746                              iter.obj(), iter.obj()->blueprint()->internal_name());
   747     }
   748     assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
   749     iter.next();
   750   }
   751   // Remember to keep sentinel pointer around
   752   iter.update_discovered();
   753   // Close the reachable set
   754   complete_gc->do_void();
   755 }
   757 void
   758 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
   759   oop obj = refs_list.head();
   760   while (obj != sentinel_ref()) {
   761     oop discovered = java_lang_ref_Reference::discovered(obj);
   762     java_lang_ref_Reference::set_discovered_raw(obj, NULL);
   763     obj = discovered;
   764   }
   765   refs_list.set_head(sentinel_ref());
   766   refs_list.set_length(0);
   767 }
   769 void ReferenceProcessor::abandon_partial_discovery() {
   770   // loop over the lists
   771   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
   772     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
   773       gclog_or_tty->print_cr("\nAbandoning %s discovered list",
   774                              list_name(i));
   775     }
   776     abandon_partial_discovered_list(_discoveredSoftRefs[i]);
   777   }
   778 }
   780 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
   781 public:
   782   RefProcPhase1Task(ReferenceProcessor& ref_processor,
   783                     DiscoveredList      refs_lists[],
   784                     ReferencePolicy*    policy,
   785                     bool                marks_oops_alive)
   786     : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
   787       _policy(policy)
   788   { }
   789   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
   790                     OopClosure& keep_alive,
   791                     VoidClosure& complete_gc)
   792   {
   793     Thread* thr = Thread::current();
   794     int refs_list_index = ((WorkerThread*)thr)->id();
   795     _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy,
   796                                   &is_alive, &keep_alive, &complete_gc);
   797   }
   798 private:
   799   ReferencePolicy* _policy;
   800 };
   802 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
   803 public:
   804   RefProcPhase2Task(ReferenceProcessor& ref_processor,
   805                     DiscoveredList      refs_lists[],
   806                     bool                marks_oops_alive)
   807     : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
   808   { }
   809   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
   810                     OopClosure& keep_alive,
   811                     VoidClosure& complete_gc)
   812   {
   813     _ref_processor.process_phase2(_refs_lists[i],
   814                                   &is_alive, &keep_alive, &complete_gc);
   815   }
   816 };
   818 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
   819 public:
   820   RefProcPhase3Task(ReferenceProcessor& ref_processor,
   821                     DiscoveredList      refs_lists[],
   822                     bool                clear_referent,
   823                     bool                marks_oops_alive)
   824     : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
   825       _clear_referent(clear_referent)
   826   { }
   827   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
   828                     OopClosure& keep_alive,
   829                     VoidClosure& complete_gc)
   830   {
   831     // Don't use "refs_list_index" calculated in this way because
   832     // balance_queues() has moved the Ref's into the first n queues.
   833     // Thread* thr = Thread::current();
   834     // int refs_list_index = ((WorkerThread*)thr)->id();
   835     // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent,
   836     _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
   837                                   &is_alive, &keep_alive, &complete_gc);
   838   }
   839 private:
   840   bool _clear_referent;
   841 };
   843 // Balances reference queues.
   844 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
   845 // queues[0, 1, ..., _num_q-1] because only the first _num_q
   846 // corresponding to the active workers will be processed.
   847 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
   848 {
   849   // calculate total length
   850   size_t total_refs = 0;
   851   if (TraceReferenceGC && PrintGCDetails) {
   852     gclog_or_tty->print_cr("\nBalance ref_lists ");
   853   }
   855   for (int i = 0; i < _max_num_q; ++i) {
   856     total_refs += ref_lists[i].length();
   857     if (TraceReferenceGC && PrintGCDetails) {
   858       gclog_or_tty->print("%d ", ref_lists[i].length());
   859     }
   860   }
   861   if (TraceReferenceGC && PrintGCDetails) {
   862     gclog_or_tty->print_cr(" = %d", total_refs);
   863   }
   864   size_t avg_refs = total_refs / _num_q + 1;
   865   int to_idx = 0;
   866   for (int from_idx = 0; from_idx < _max_num_q; from_idx++) {
   867     bool move_all = false;
   868     if (from_idx >= _num_q) {
   869       move_all = ref_lists[from_idx].length() > 0;
   870     }
   871     while ((ref_lists[from_idx].length() > avg_refs) ||
   872            move_all) {
   873       assert(to_idx < _num_q, "Sanity Check!");
   874       if (ref_lists[to_idx].length() < avg_refs) {
   875         // move superfluous refs
   876         size_t refs_to_move;
   877         // Move all the Ref's if the from queue will not be processed.
   878         if (move_all) {
   879           refs_to_move = MIN2(ref_lists[from_idx].length(),
   880                               avg_refs - ref_lists[to_idx].length());
   881         } else {
   882           refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
   883                               avg_refs - ref_lists[to_idx].length());
   884         }
   885         oop move_head = ref_lists[from_idx].head();
   886         oop move_tail = move_head;
   887         oop new_head  = move_head;
   888         // find an element to split the list on
   889         for (size_t j = 0; j < refs_to_move; ++j) {
   890           move_tail = new_head;
   891           new_head = java_lang_ref_Reference::discovered(new_head);
   892         }
   893         java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
   894         ref_lists[to_idx].set_head(move_head);
   895         ref_lists[to_idx].inc_length(refs_to_move);
   896         ref_lists[from_idx].set_head(new_head);
   897         ref_lists[from_idx].dec_length(refs_to_move);
   898         if (ref_lists[from_idx].length() == 0) {
   899           break;
   900         }
   901       } else {
   902         to_idx = (to_idx + 1) % _num_q;
   903       }
   904     }
   905   }
   906 #ifdef ASSERT
   907   size_t balanced_total_refs = 0;
   908   for (int i = 0; i < _max_num_q; ++i) {
   909     balanced_total_refs += ref_lists[i].length();
   910     if (TraceReferenceGC && PrintGCDetails) {
   911       gclog_or_tty->print("%d ", ref_lists[i].length());
   912     }
   913   }
   914   if (TraceReferenceGC && PrintGCDetails) {
   915     gclog_or_tty->print_cr(" = %d", balanced_total_refs);
   916     gclog_or_tty->flush();
   917   }
   918   assert(total_refs == balanced_total_refs, "Balancing was incomplete");
   919 #endif
   920 }
   922 void ReferenceProcessor::balance_all_queues() {
   923   balance_queues(_discoveredSoftRefs);
   924   balance_queues(_discoveredWeakRefs);
   925   balance_queues(_discoveredFinalRefs);
   926   balance_queues(_discoveredPhantomRefs);
   927 }
   929 void
   930 ReferenceProcessor::process_discovered_reflist(
   931   DiscoveredList               refs_lists[],
   932   ReferencePolicy*             policy,
   933   bool                         clear_referent,
   934   BoolObjectClosure*           is_alive,
   935   OopClosure*                  keep_alive,
   936   VoidClosure*                 complete_gc,
   937   AbstractRefProcTaskExecutor* task_executor)
   938 {
   939   bool mt_processing = task_executor != NULL && _processing_is_mt;
   940   // If discovery used MT and a dynamic number of GC threads, then
   941   // the queues must be balanced for correctness if fewer than the
   942   // maximum number of queues were used.  The number of queue used
   943   // during discovery may be different than the number to be used
   944   // for processing so don't depend of _num_q < _max_num_q as part
   945   // of the test.
   946   bool must_balance = _discovery_is_mt;
   948   if ((mt_processing && ParallelRefProcBalancingEnabled) ||
   949       must_balance) {
   950     balance_queues(refs_lists);
   951   }
   952   if (PrintReferenceGC && PrintGCDetails) {
   953     size_t total = 0;
   954     for (int i = 0; i < _num_q; ++i) {
   955       total += refs_lists[i].length();
   956     }
   957     gclog_or_tty->print(", %u refs", total);
   958   }
   960   // Phase 1 (soft refs only):
   961   // . Traverse the list and remove any SoftReferences whose
   962   //   referents are not alive, but that should be kept alive for
   963   //   policy reasons. Keep alive the transitive closure of all
   964   //   such referents.
   965   if (policy != NULL) {
   966     if (mt_processing) {
   967       RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
   968       task_executor->execute(phase1);
   969     } else {
   970       for (int i = 0; i < _num_q; i++) {
   971         process_phase1(refs_lists[i], policy,
   972                        is_alive, keep_alive, complete_gc);
   973       }
   974     }
   975   } else { // policy == NULL
   976     assert(refs_lists != _discoveredSoftRefs,
   977            "Policy must be specified for soft references.");
   978   }
   980   // Phase 2:
   981   // . Traverse the list and remove any refs whose referents are alive.
   982   if (mt_processing) {
   983     RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
   984     task_executor->execute(phase2);
   985   } else {
   986     for (int i = 0; i < _num_q; i++) {
   987       process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
   988     }
   989   }
   991   // Phase 3:
   992   // . Traverse the list and process referents as appropriate.
   993   if (mt_processing) {
   994     RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
   995     task_executor->execute(phase3);
   996   } else {
   997     for (int i = 0; i < _num_q; i++) {
   998       process_phase3(refs_lists[i], clear_referent,
   999                      is_alive, keep_alive, complete_gc);
  1004 void ReferenceProcessor::clean_up_discovered_references() {
  1005   // loop over the lists
  1006   // Should this instead be
  1007   // for (int i = 0; i < subclasses_of_ref; i++_ {
  1008   //   for (int j = 0; j < _num_q; j++) {
  1009   //     int index = i * _max_num_q + j;
  1010   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
  1011     if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
  1012       gclog_or_tty->print_cr(
  1013         "\nScrubbing %s discovered list of Null referents",
  1014         list_name(i));
  1016     clean_up_discovered_reflist(_discoveredSoftRefs[i]);
  1020 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
  1021   assert(!discovery_is_atomic(), "Else why call this method?");
  1022   DiscoveredListIterator iter(refs_list, NULL, NULL);
  1023   while (iter.has_next()) {
  1024     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
  1025     oop next = java_lang_ref_Reference::next(iter.obj());
  1026     assert(next->is_oop_or_null(), "bad next field");
  1027     // If referent has been cleared or Reference is not active,
  1028     // drop it.
  1029     if (iter.referent() == NULL || next != NULL) {
  1030       debug_only(
  1031         if (PrintGCDetails && TraceReferenceGC) {
  1032           gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
  1033             INTPTR_FORMAT " with next field: " INTPTR_FORMAT
  1034             " and referent: " INTPTR_FORMAT,
  1035             iter.obj(), next, iter.referent());
  1038       // Remove Reference object from list
  1039       iter.remove();
  1040       iter.move_to_next();
  1041     } else {
  1042       iter.next();
  1045   NOT_PRODUCT(
  1046     if (PrintGCDetails && TraceReferenceGC) {
  1047       gclog_or_tty->print(
  1048         " Removed %d Refs with NULL referents out of %d discovered Refs",
  1049         iter.removed(), iter.processed());
  1054 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
  1055   int id = 0;
  1056   // Determine the queue index to use for this object.
  1057   if (_discovery_is_mt) {
  1058     // During a multi-threaded discovery phase,
  1059     // each thread saves to its "own" list.
  1060     Thread* thr = Thread::current();
  1061     id = thr->as_Worker_thread()->id();
  1062   } else {
  1063     // single-threaded discovery, we save in round-robin
  1064     // fashion to each of the lists.
  1065     if (_processing_is_mt) {
  1066       id = next_id();
  1069   assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)");
  1071   // Get the discovered queue to which we will add
  1072   DiscoveredList* list = NULL;
  1073   switch (rt) {
  1074     case REF_OTHER:
  1075       // Unknown reference type, no special treatment
  1076       break;
  1077     case REF_SOFT:
  1078       list = &_discoveredSoftRefs[id];
  1079       break;
  1080     case REF_WEAK:
  1081       list = &_discoveredWeakRefs[id];
  1082       break;
  1083     case REF_FINAL:
  1084       list = &_discoveredFinalRefs[id];
  1085       break;
  1086     case REF_PHANTOM:
  1087       list = &_discoveredPhantomRefs[id];
  1088       break;
  1089     case REF_NONE:
  1090       // we should not reach here if we are an instanceRefKlass
  1091     default:
  1092       ShouldNotReachHere();
  1094   if (TraceReferenceGC && PrintGCDetails) {
  1095     gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list);
  1097   return list;
  1100 inline void
  1101 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
  1102                                               oop             obj,
  1103                                               HeapWord*       discovered_addr) {
  1104   assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
  1105   // First we must make sure this object is only enqueued once. CAS in a non null
  1106   // discovered_addr.
  1107   oop current_head = refs_list.head();
  1109   // Note: In the case of G1, this specific pre-barrier is strictly
  1110   // not necessary because the only case we are interested in
  1111   // here is when *discovered_addr is NULL (see the CAS further below),
  1112   // so this will expand to nothing. As a result, we have manually
  1113   // elided this out for G1, but left in the test for some future
  1114   // collector that might have need for a pre-barrier here.
  1115   if (_discovered_list_needs_barrier && !UseG1GC) {
  1116     if (UseCompressedOops) {
  1117       _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
  1118     } else {
  1119       _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
  1121     guarantee(false, "Need to check non-G1 collector");
  1123   oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
  1124                                                     NULL);
  1125   if (retest == NULL) {
  1126     // This thread just won the right to enqueue the object.
  1127     // We have separate lists for enqueueing so no synchronization
  1128     // is necessary.
  1129     refs_list.set_head(obj);
  1130     refs_list.inc_length(1);
  1131     if (_discovered_list_needs_barrier) {
  1132       _bs->write_ref_field((void*)discovered_addr, current_head);
  1135     if (TraceReferenceGC) {
  1136       gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
  1137                              obj, obj->blueprint()->internal_name());
  1139   } else {
  1140     // If retest was non NULL, another thread beat us to it:
  1141     // The reference has already been discovered...
  1142     if (TraceReferenceGC) {
  1143       gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
  1144                              obj, obj->blueprint()->internal_name());
  1149 #ifndef PRODUCT
  1150 // Non-atomic (i.e. concurrent) discovery might allow us
  1151 // to observe j.l.References with NULL referents, being those
  1152 // cleared concurrently by mutators during (or after) discovery.
  1153 void ReferenceProcessor::verify_referent(oop obj) {
  1154   bool da = discovery_is_atomic();
  1155   oop referent = java_lang_ref_Reference::referent(obj);
  1156   assert(da ? referent->is_oop() : referent->is_oop_or_null(),
  1157          err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
  1158                  INTPTR_FORMAT " during %satomic discovery ",
  1159                  (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
  1161 #endif
  1163 // We mention two of several possible choices here:
  1164 // #0: if the reference object is not in the "originating generation"
  1165 //     (or part of the heap being collected, indicated by our "span"
  1166 //     we don't treat it specially (i.e. we scan it as we would
  1167 //     a normal oop, treating its references as strong references).
  1168 //     This means that references can't be enqueued unless their
  1169 //     referent is also in the same span. This is the simplest,
  1170 //     most "local" and most conservative approach, albeit one
  1171 //     that may cause weak references to be enqueued least promptly.
  1172 //     We call this choice the "ReferenceBasedDiscovery" policy.
  1173 // #1: the reference object may be in any generation (span), but if
  1174 //     the referent is in the generation (span) being currently collected
  1175 //     then we can discover the reference object, provided
  1176 //     the object has not already been discovered by
  1177 //     a different concurrently running collector (as may be the
  1178 //     case, for instance, if the reference object is in CMS and
  1179 //     the referent in DefNewGeneration), and provided the processing
  1180 //     of this reference object by the current collector will
  1181 //     appear atomic to every other collector in the system.
  1182 //     (Thus, for instance, a concurrent collector may not
  1183 //     discover references in other generations even if the
  1184 //     referent is in its own generation). This policy may,
  1185 //     in certain cases, enqueue references somewhat sooner than
  1186 //     might Policy #0 above, but at marginally increased cost
  1187 //     and complexity in processing these references.
  1188 //     We call this choice the "RefeferentBasedDiscovery" policy.
  1189 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
  1190   // We enqueue references only if we are discovering refs
  1191   // (rather than processing discovered refs).
  1192   if (!_discovering_refs || !RegisterReferences) {
  1193     return false;
  1195   // We only enqueue active references.
  1196   oop next = java_lang_ref_Reference::next(obj);
  1197   if (next != NULL) {
  1198     return false;
  1201   HeapWord* obj_addr = (HeapWord*)obj;
  1202   if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
  1203       !_span.contains(obj_addr)) {
  1204     // Reference is not in the originating generation;
  1205     // don't treat it specially (i.e. we want to scan it as a normal
  1206     // object with strong references).
  1207     return false;
  1210   // We only enqueue references whose referents are not (yet) strongly
  1211   // reachable.
  1212   if (is_alive_non_header() != NULL) {
  1213     verify_referent(obj);
  1214     if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
  1215       return false;  // referent is reachable
  1218   if (rt == REF_SOFT) {
  1219     // For soft refs we can decide now if these are not
  1220     // current candidates for clearing, in which case we
  1221     // can mark through them now, rather than delaying that
  1222     // to the reference-processing phase. Since all current
  1223     // time-stamp policies advance the soft-ref clock only
  1224     // at a major collection cycle, this is always currently
  1225     // accurate.
  1226     if (!_current_soft_ref_policy->should_clear_reference(obj)) {
  1227       return false;
  1231   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
  1232   const oop  discovered = java_lang_ref_Reference::discovered(obj);
  1233   assert(discovered->is_oop_or_null(), "bad discovered field");
  1234   if (discovered != NULL) {
  1235     // The reference has already been discovered...
  1236     if (TraceReferenceGC) {
  1237       gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
  1238                              obj, obj->blueprint()->internal_name());
  1240     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
  1241       // assumes that an object is not processed twice;
  1242       // if it's been already discovered it must be on another
  1243       // generation's discovered list; so we won't discover it.
  1244       return false;
  1245     } else {
  1246       assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
  1247              "Unrecognized policy");
  1248       // Check assumption that an object is not potentially
  1249       // discovered twice except by concurrent collectors that potentially
  1250       // trace the same Reference object twice.
  1251       assert(UseConcMarkSweepGC || UseG1GC,
  1252              "Only possible with a concurrent marking collector");
  1253       return true;
  1257   if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
  1258     verify_referent(obj);
  1259     // enqueue if and only if either:
  1260     // reference is in our span or
  1261     // we are an atomic collector and referent is in our span
  1262     if (_span.contains(obj_addr) ||
  1263         (discovery_is_atomic() &&
  1264          _span.contains(java_lang_ref_Reference::referent(obj)))) {
  1265       // should_enqueue = true;
  1266     } else {
  1267       return false;
  1269   } else {
  1270     assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
  1271            _span.contains(obj_addr), "code inconsistency");
  1274   // Get the right type of discovered queue head.
  1275   DiscoveredList* list = get_discovered_list(rt);
  1276   if (list == NULL) {
  1277     return false;   // nothing special needs to be done
  1280   if (_discovery_is_mt) {
  1281     add_to_discovered_list_mt(*list, obj, discovered_addr);
  1282   } else {
  1283     // If "_discovered_list_needs_barrier", we do write barriers when
  1284     // updating the discovered reference list.  Otherwise, we do a raw store
  1285     // here: the field will be visited later when processing the discovered
  1286     // references.
  1287     oop current_head = list->head();
  1288     // As in the case further above, since we are over-writing a NULL
  1289     // pre-value, we can safely elide the pre-barrier here for the case of G1.
  1290     assert(discovered == NULL, "control point invariant");
  1291     if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
  1292       if (UseCompressedOops) {
  1293         _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
  1294       } else {
  1295         _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
  1297       guarantee(false, "Need to check non-G1 collector");
  1299     oop_store_raw(discovered_addr, current_head);
  1300     if (_discovered_list_needs_barrier) {
  1301       _bs->write_ref_field((void*)discovered_addr, current_head);
  1303     list->set_head(obj);
  1304     list->inc_length(1);
  1306     if (TraceReferenceGC) {
  1307       gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
  1308                                 obj, obj->blueprint()->internal_name());
  1311   assert(obj->is_oop(), "Enqueued a bad reference");
  1312   verify_referent(obj);
  1313   return true;
  1316 // Preclean the discovered references by removing those
  1317 // whose referents are alive, and by marking from those that
  1318 // are not active. These lists can be handled here
  1319 // in any order and, indeed, concurrently.
  1320 void ReferenceProcessor::preclean_discovered_references(
  1321   BoolObjectClosure* is_alive,
  1322   OopClosure* keep_alive,
  1323   VoidClosure* complete_gc,
  1324   YieldClosure* yield,
  1325   bool should_unload_classes) {
  1327   NOT_PRODUCT(verify_ok_to_handle_reflists());
  1329 #ifdef ASSERT
  1330   bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
  1331                                CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
  1332                                ExplicitGCInvokesConcurrentAndUnloadsClasses &&
  1333                                  UseConcMarkSweepGC && should_unload_classes;
  1334   RememberKlassesChecker mx(must_remember_klasses);
  1335 #endif
  1336   // Soft references
  1338     TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
  1339               false, gclog_or_tty);
  1340     for (int i = 0; i < _max_num_q; i++) {
  1341       if (yield->should_return()) {
  1342         return;
  1344       preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
  1345                                   keep_alive, complete_gc, yield);
  1349   // Weak references
  1351     TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
  1352               false, gclog_or_tty);
  1353     for (int i = 0; i < _num_q; i++) {
  1354       if (yield->should_return()) {
  1355         return;
  1357       preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
  1358                                   keep_alive, complete_gc, yield);
  1362   // Final references
  1364     TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
  1365               false, gclog_or_tty);
  1366     for (int i = 0; i < _num_q; i++) {
  1367       if (yield->should_return()) {
  1368         return;
  1370       preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
  1371                                   keep_alive, complete_gc, yield);
  1375   // Phantom references
  1377     TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
  1378               false, gclog_or_tty);
  1379     for (int i = 0; i < _num_q; i++) {
  1380       if (yield->should_return()) {
  1381         return;
  1383       preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
  1384                                   keep_alive, complete_gc, yield);
  1389 // Walk the given discovered ref list, and remove all reference objects
  1390 // whose referents are still alive, whose referents are NULL or which
  1391 // are not active (have a non-NULL next field). NOTE: When we are
  1392 // thus precleaning the ref lists (which happens single-threaded today),
  1393 // we do not disable refs discovery to honour the correct semantics of
  1394 // java.lang.Reference. As a result, we need to be careful below
  1395 // that ref removal steps interleave safely with ref discovery steps
  1396 // (in this thread).
  1397 void
  1398 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList&    refs_list,
  1399                                                 BoolObjectClosure* is_alive,
  1400                                                 OopClosure*        keep_alive,
  1401                                                 VoidClosure*       complete_gc,
  1402                                                 YieldClosure*      yield) {
  1403   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  1404   while (iter.has_next()) {
  1405     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
  1406     oop obj = iter.obj();
  1407     oop next = java_lang_ref_Reference::next(obj);
  1408     if (iter.referent() == NULL || iter.is_referent_alive() ||
  1409         next != NULL) {
  1410       // The referent has been cleared, or is alive, or the Reference is not
  1411       // active; we need to trace and mark its cohort.
  1412       if (TraceReferenceGC) {
  1413         gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
  1414                                iter.obj(), iter.obj()->blueprint()->internal_name());
  1416       // Remove Reference object from list
  1417       iter.remove();
  1418       // Keep alive its cohort.
  1419       iter.make_referent_alive();
  1420       if (UseCompressedOops) {
  1421         narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
  1422         keep_alive->do_oop(next_addr);
  1423       } else {
  1424         oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
  1425         keep_alive->do_oop(next_addr);
  1427       iter.move_to_next();
  1428     } else {
  1429       iter.next();
  1432   // Close the reachable set
  1433   complete_gc->do_void();
  1435   NOT_PRODUCT(
  1436     if (PrintGCDetails && PrintReferenceGC) {
  1437       gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
  1438         "Refs in discovered list " INTPTR_FORMAT,
  1439         iter.removed(), iter.processed(), (address)refs_list.head());
  1444 const char* ReferenceProcessor::list_name(int i) {
  1445    assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
  1446    int j = i / _max_num_q;
  1447    switch (j) {
  1448      case 0: return "SoftRef";
  1449      case 1: return "WeakRef";
  1450      case 2: return "FinalRef";
  1451      case 3: return "PhantomRef";
  1453    ShouldNotReachHere();
  1454    return NULL;
  1457 #ifndef PRODUCT
  1458 void ReferenceProcessor::verify_ok_to_handle_reflists() {
  1459   // empty for now
  1461 #endif
  1463 void ReferenceProcessor::verify() {
  1464   guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
  1467 #ifndef PRODUCT
  1468 void ReferenceProcessor::clear_discovered_references() {
  1469   guarantee(!_discovering_refs, "Discovering refs?");
  1470   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
  1471     oop obj = _discoveredSoftRefs[i].head();
  1472     while (obj != sentinel_ref()) {
  1473       oop next = java_lang_ref_Reference::discovered(obj);
  1474       java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
  1475       obj = next;
  1477     _discoveredSoftRefs[i].set_head(sentinel_ref());
  1478     _discoveredSoftRefs[i].set_length(0);
  1481 #endif // PRODUCT

mercurial