src/share/vm/memory/referenceProcessor.cpp

Thu, 20 Nov 2008 12:27:41 -0800

author
ysr
date
Thu, 20 Nov 2008 12:27:41 -0800
changeset 887
00b023ae2d78
parent 791
1ee8caae33af
child 888
c96030fff130
permissions
-rw-r--r--

6722113: CMS: Incorrect overflow handling during precleaning of Reference lists
Summary: When we encounter marking stack overflow during precleaning of Reference lists, we were using the overflow list mechanism, which can cause problems on account of mutating the mark word of the header because of conflicts with mutator accesses and updates of that field. Instead we should use the usual mechanism for overflow handling in concurrent phases, namely dirtying of the card on which the overflowed object lies. Since precleaning effectively does a form of discovered list processing, albeit with discovery enabled, we needed to adjust some code to be correct in the face of interleaved processing and discovery.
Reviewed-by: apetrusenko, jcoomes

     1 /*
     2  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_referenceProcessor.cpp.incl"
    28 // List of discovered references.
    29 class DiscoveredList {
    30 public:
    31   DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
    32   oop head() const     {
    33      return UseCompressedOops ?  oopDesc::decode_heap_oop_not_null(_compressed_head) :
    34                                 _oop_head;
    35   }
    36   HeapWord* adr_head() {
    37     return UseCompressedOops ? (HeapWord*)&_compressed_head :
    38                                (HeapWord*)&_oop_head;
    39   }
    40   void   set_head(oop o) {
    41     if (UseCompressedOops) {
    42       // Must compress the head ptr.
    43       _compressed_head = oopDesc::encode_heap_oop_not_null(o);
    44     } else {
    45       _oop_head = o;
    46     }
    47   }
    48   bool   empty() const          { return head() == ReferenceProcessor::sentinel_ref(); }
    49   size_t length()               { return _len; }
    50   void   set_length(size_t len) { _len = len;  }
    51   void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
    52   void   dec_length(size_t dec) { _len -= dec; }
    53 private:
    54   // Set value depending on UseCompressedOops. This could be a template class
    55   // but then we have to fix all the instantiations and declarations that use this class.
    56   oop       _oop_head;
    57   narrowOop _compressed_head;
    58   size_t _len;
    59 };
    61 oop  ReferenceProcessor::_sentinelRef = NULL;
    63 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
    65 void referenceProcessor_init() {
    66   ReferenceProcessor::init_statics();
    67 }
    69 void ReferenceProcessor::init_statics() {
    70   assert(_sentinelRef == NULL, "should be initialized precisely once");
    71   EXCEPTION_MARK;
    72   _sentinelRef = instanceKlass::cast(
    73                     SystemDictionary::reference_klass())->
    74                       allocate_permanent_instance(THREAD);
    76   // Initialize the master soft ref clock.
    77   java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
    79   if (HAS_PENDING_EXCEPTION) {
    80       Handle ex(THREAD, PENDING_EXCEPTION);
    81       vm_exit_during_initialization(ex);
    82   }
    83   assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
    84          "Just constructed it!");
    85   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
    86             RefDiscoveryPolicy == ReferentBasedDiscovery,
    87             "Unrecongnized RefDiscoveryPolicy");
    88 }
    90 ReferenceProcessor*
    91 ReferenceProcessor::create_ref_processor(MemRegion          span,
    92                                          bool               atomic_discovery,
    93                                          bool               mt_discovery,
    94                                          BoolObjectClosure* is_alive_non_header,
    95                                          int                parallel_gc_threads,
    96                                          bool               mt_processing,
    97                                          bool               dl_needs_barrier) {
    98   int mt_degree = 1;
    99   if (parallel_gc_threads > 1) {
   100     mt_degree = parallel_gc_threads;
   101   }
   102   ReferenceProcessor* rp =
   103     new ReferenceProcessor(span, atomic_discovery,
   104                            mt_discovery, mt_degree,
   105                            mt_processing && (parallel_gc_threads > 0),
   106                            dl_needs_barrier);
   107   if (rp == NULL) {
   108     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
   109   }
   110   rp->set_is_alive_non_header(is_alive_non_header);
   111   return rp;
   112 }
   114 ReferenceProcessor::ReferenceProcessor(MemRegion span,
   115                                        bool      atomic_discovery,
   116                                        bool      mt_discovery,
   117                                        int       mt_degree,
   118                                        bool      mt_processing,
   119                                        bool      discovered_list_needs_barrier)  :
   120   _discovering_refs(false),
   121   _enqueuing_is_done(false),
   122   _is_alive_non_header(NULL),
   123   _discovered_list_needs_barrier(discovered_list_needs_barrier),
   124   _bs(NULL),
   125   _processing_is_mt(mt_processing),
   126   _next_id(0)
   127 {
   128   _span = span;
   129   _discovery_is_atomic = atomic_discovery;
   130   _discovery_is_mt     = mt_discovery;
   131   _num_q               = mt_degree;
   132   _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList, _num_q * subclasses_of_ref);
   133   if (_discoveredSoftRefs == NULL) {
   134     vm_exit_during_initialization("Could not allocated RefProc Array");
   135   }
   136   _discoveredWeakRefs    = &_discoveredSoftRefs[_num_q];
   137   _discoveredFinalRefs   = &_discoveredWeakRefs[_num_q];
   138   _discoveredPhantomRefs = &_discoveredFinalRefs[_num_q];
   139   assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
   140   // Initialized all entries to _sentinelRef
   141   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
   142         _discoveredSoftRefs[i].set_head(sentinel_ref());
   143     _discoveredSoftRefs[i].set_length(0);
   144   }
   145   // If we do barreirs, cache a copy of the barrier set.
   146   if (discovered_list_needs_barrier) {
   147     _bs = Universe::heap()->barrier_set();
   148   }
   149 }
   151 #ifndef PRODUCT
   152 void ReferenceProcessor::verify_no_references_recorded() {
   153   guarantee(!_discovering_refs, "Discovering refs?");
   154   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
   155     guarantee(_discoveredSoftRefs[i].empty(),
   156               "Found non-empty discovered list");
   157   }
   158 }
   159 #endif
   161 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
   162   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
   163     if (UseCompressedOops) {
   164       f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
   165     } else {
   166       f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
   167     }
   168   }
   169 }
   171 void ReferenceProcessor::oops_do(OopClosure* f) {
   172   f->do_oop(adr_sentinel_ref());
   173 }
   175 void ReferenceProcessor::update_soft_ref_master_clock() {
   176   // Update (advance) the soft ref master clock field. This must be done
   177   // after processing the soft ref list.
   178   jlong now = os::javaTimeMillis();
   179   jlong clock = java_lang_ref_SoftReference::clock();
   180   NOT_PRODUCT(
   181   if (now < clock) {
   182     warning("time warp: %d to %d", clock, now);
   183   }
   184   )
   185   // In product mode, protect ourselves from system time being adjusted
   186   // externally and going backward; see note in the implementation of
   187   // GenCollectedHeap::time_since_last_gc() for the right way to fix
   188   // this uniformly throughout the VM; see bug-id 4741166. XXX
   189   if (now > clock) {
   190     java_lang_ref_SoftReference::set_clock(now);
   191   }
   192   // Else leave clock stalled at its old value until time progresses
   193   // past clock value.
   194 }
   196 void ReferenceProcessor::process_discovered_references(
   197   ReferencePolicy*             policy,
   198   BoolObjectClosure*           is_alive,
   199   OopClosure*                  keep_alive,
   200   VoidClosure*                 complete_gc,
   201   AbstractRefProcTaskExecutor* task_executor) {
   202   NOT_PRODUCT(verify_ok_to_handle_reflists());
   204   assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
   205   // Stop treating discovered references specially.
   206   disable_discovery();
   208   bool trace_time = PrintGCDetails && PrintReferenceGC;
   209   // Soft references
   210   {
   211     TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
   212     process_discovered_reflist(_discoveredSoftRefs, policy, true,
   213                                is_alive, keep_alive, complete_gc, task_executor);
   214   }
   216   update_soft_ref_master_clock();
   218   // Weak references
   219   {
   220     TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
   221     process_discovered_reflist(_discoveredWeakRefs, NULL, true,
   222                                is_alive, keep_alive, complete_gc, task_executor);
   223   }
   225   // Final references
   226   {
   227     TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
   228     process_discovered_reflist(_discoveredFinalRefs, NULL, false,
   229                                is_alive, keep_alive, complete_gc, task_executor);
   230   }
   232   // Phantom references
   233   {
   234     TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
   235     process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
   236                                is_alive, keep_alive, complete_gc, task_executor);
   237   }
   239   // Weak global JNI references. It would make more sense (semantically) to
   240   // traverse these simultaneously with the regular weak references above, but
   241   // that is not how the JDK1.2 specification is. See #4126360. Native code can
   242   // thus use JNI weak references to circumvent the phantom references and
   243   // resurrect a "post-mortem" object.
   244   {
   245     TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
   246     if (task_executor != NULL) {
   247       task_executor->set_single_threaded_mode();
   248     }
   249     process_phaseJNI(is_alive, keep_alive, complete_gc);
   250   }
   251 }
   253 #ifndef PRODUCT
   254 // Calculate the number of jni handles.
   255 uint ReferenceProcessor::count_jni_refs() {
   256   class AlwaysAliveClosure: public BoolObjectClosure {
   257   public:
   258     virtual bool do_object_b(oop obj) { return true; }
   259     virtual void do_object(oop obj) { assert(false, "Don't call"); }
   260   };
   262   class CountHandleClosure: public OopClosure {
   263   private:
   264     int _count;
   265   public:
   266     CountHandleClosure(): _count(0) {}
   267     void do_oop(oop* unused)       { _count++; }
   268     void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
   269     int count() { return _count; }
   270   };
   271   CountHandleClosure global_handle_count;
   272   AlwaysAliveClosure always_alive;
   273   JNIHandles::weak_oops_do(&always_alive, &global_handle_count);
   274   return global_handle_count.count();
   275 }
   276 #endif
   278 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
   279                                           OopClosure*        keep_alive,
   280                                           VoidClosure*       complete_gc) {
   281 #ifndef PRODUCT
   282   if (PrintGCDetails && PrintReferenceGC) {
   283     unsigned int count = count_jni_refs();
   284     gclog_or_tty->print(", %u refs", count);
   285   }
   286 #endif
   287   JNIHandles::weak_oops_do(is_alive, keep_alive);
   288   // Finally remember to keep sentinel around
   289   keep_alive->do_oop(adr_sentinel_ref());
   290   complete_gc->do_void();
   291 }
   294 template <class T>
   295 static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
   296                                           AbstractRefProcTaskExecutor* task_executor) {
   298   // Remember old value of pending references list
   299   T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
   300   T old_pending_list_value = *pending_list_addr;
   302   // Enqueue references that are not made active again, and
   303   // clear the decks for the next collection (cycle).
   304   ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
   305   // Do the oop-check on pending_list_addr missed in
   306   // enqueue_discovered_reflist. We should probably
   307   // do a raw oop_check so that future such idempotent
   308   // oop_stores relying on the oop-check side-effect
   309   // may be elided automatically and safely without
   310   // affecting correctness.
   311   oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
   313   // Stop treating discovered references specially.
   314   ref->disable_discovery();
   316   // Return true if new pending references were added
   317   return old_pending_list_value != *pending_list_addr;
   318 }
   320 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
   321   NOT_PRODUCT(verify_ok_to_handle_reflists());
   322   if (UseCompressedOops) {
   323     return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
   324   } else {
   325     return enqueue_discovered_ref_helper<oop>(this, task_executor);
   326   }
   327 }
   329 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
   330                                                     HeapWord* pending_list_addr) {
   331   // Given a list of refs linked through the "discovered" field
   332   // (java.lang.ref.Reference.discovered) chain them through the
   333   // "next" field (java.lang.ref.Reference.next) and prepend
   334   // to the pending list.
   335   if (TraceReferenceGC && PrintGCDetails) {
   336     gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
   337                            INTPTR_FORMAT, (address)refs_list.head());
   338   }
   339   oop obj = refs_list.head();
   340   // Walk down the list, copying the discovered field into
   341   // the next field and clearing it (except for the last
   342   // non-sentinel object which is treated specially to avoid
   343   // confusion with an active reference).
   344   while (obj != sentinel_ref()) {
   345     assert(obj->is_instanceRef(), "should be reference object");
   346     oop next = java_lang_ref_Reference::discovered(obj);
   347     if (TraceReferenceGC && PrintGCDetails) {
   348       gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
   349                              obj, next);
   350     }
   351     assert(java_lang_ref_Reference::next(obj) == NULL,
   352            "The reference should not be enqueued");
   353     if (next == sentinel_ref()) {  // obj is last
   354       // Swap refs_list into pendling_list_addr and
   355       // set obj's next to what we read from pending_list_addr.
   356       oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
   357       // Need oop_check on pending_list_addr above;
   358       // see special oop-check code at the end of
   359       // enqueue_discovered_reflists() further below.
   360       if (old == NULL) {
   361         // obj should be made to point to itself, since
   362         // pending list was empty.
   363         java_lang_ref_Reference::set_next(obj, obj);
   364       } else {
   365         java_lang_ref_Reference::set_next(obj, old);
   366       }
   367     } else {
   368       java_lang_ref_Reference::set_next(obj, next);
   369     }
   370     java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
   371     obj = next;
   372   }
   373 }
   375 // Parallel enqueue task
   376 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
   377 public:
   378   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
   379                      DiscoveredList      discovered_refs[],
   380                      HeapWord*           pending_list_addr,
   381                      oop                 sentinel_ref,
   382                      int                 n_queues)
   383     : EnqueueTask(ref_processor, discovered_refs,
   384                   pending_list_addr, sentinel_ref, n_queues)
   385   { }
   387   virtual void work(unsigned int work_id) {
   388     assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
   389     // Simplest first cut: static partitioning.
   390     int index = work_id;
   391     for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) {
   392       _ref_processor.enqueue_discovered_reflist(
   393         _refs_lists[index], _pending_list_addr);
   394       _refs_lists[index].set_head(_sentinel_ref);
   395       _refs_lists[index].set_length(0);
   396     }
   397   }
   398 };
   400 // Enqueue references that are not made active again
   401 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
   402   AbstractRefProcTaskExecutor* task_executor) {
   403   if (_processing_is_mt && task_executor != NULL) {
   404     // Parallel code
   405     RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
   406                            pending_list_addr, sentinel_ref(), _num_q);
   407     task_executor->execute(tsk);
   408   } else {
   409     // Serial code: call the parent class's implementation
   410     for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
   411       enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
   412       _discoveredSoftRefs[i].set_head(sentinel_ref());
   413       _discoveredSoftRefs[i].set_length(0);
   414     }
   415   }
   416 }
   418 // Iterator for the list of discovered references.
   419 class DiscoveredListIterator {
   420 public:
   421   inline DiscoveredListIterator(DiscoveredList&    refs_list,
   422                                 OopClosure*        keep_alive,
   423                                 BoolObjectClosure* is_alive);
   425   // End Of List.
   426   inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
   428   // Get oop to the Reference object.
   429   inline oop obj() const { return _ref; }
   431   // Get oop to the referent object.
   432   inline oop referent() const { return _referent; }
   434   // Returns true if referent is alive.
   435   inline bool is_referent_alive() const;
   437   // Loads data for the current reference.
   438   // The "allow_null_referent" argument tells us to allow for the possibility
   439   // of a NULL referent in the discovered Reference object. This typically
   440   // happens in the case of concurrent collectors that may have done the
   441   // discovery concurrently, or interleaved, with mutator execution.
   442   inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
   444   // Move to the next discovered reference.
   445   inline void next();
   447   // Remove the current reference from the list
   448   inline void remove();
   450   // Make the Reference object active again.
   451   inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
   453   // Make the referent alive.
   454   inline void make_referent_alive() {
   455     if (UseCompressedOops) {
   456       _keep_alive->do_oop((narrowOop*)_referent_addr);
   457     } else {
   458       _keep_alive->do_oop((oop*)_referent_addr);
   459     }
   460   }
   462   // Update the discovered field.
   463   inline void update_discovered() {
   464     // First _prev_next ref actually points into DiscoveredList (gross).
   465     if (UseCompressedOops) {
   466       _keep_alive->do_oop((narrowOop*)_prev_next);
   467     } else {
   468       _keep_alive->do_oop((oop*)_prev_next);
   469     }
   470   }
   472   // NULL out referent pointer.
   473   inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
   475   // Statistics
   476   NOT_PRODUCT(
   477   inline size_t processed() const { return _processed; }
   478   inline size_t removed() const   { return _removed; }
   479   )
   481   inline void move_to_next();
   483 private:
   484   DiscoveredList&    _refs_list;
   485   HeapWord*          _prev_next;
   486   oop                _ref;
   487   HeapWord*          _discovered_addr;
   488   oop                _next;
   489   HeapWord*          _referent_addr;
   490   oop                _referent;
   491   OopClosure*        _keep_alive;
   492   BoolObjectClosure* _is_alive;
   493   DEBUG_ONLY(
   494   oop                _first_seen; // cyclic linked list check
   495   )
   496   NOT_PRODUCT(
   497   size_t             _processed;
   498   size_t             _removed;
   499   )
   500 };
   502 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList&    refs_list,
   503                                                       OopClosure*        keep_alive,
   504                                                       BoolObjectClosure* is_alive)
   505   : _refs_list(refs_list),
   506     _prev_next(refs_list.adr_head()),
   507     _ref(refs_list.head()),
   508 #ifdef ASSERT
   509     _first_seen(refs_list.head()),
   510 #endif
   511 #ifndef PRODUCT
   512     _processed(0),
   513     _removed(0),
   514 #endif
   515     _next(refs_list.head()),
   516     _keep_alive(keep_alive),
   517     _is_alive(is_alive)
   518 { }
   520 inline bool DiscoveredListIterator::is_referent_alive() const {
   521   return _is_alive->do_object_b(_referent);
   522 }
   524 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
   525   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
   526   oop discovered = java_lang_ref_Reference::discovered(_ref);
   527   assert(_discovered_addr && discovered->is_oop_or_null(),
   528          "discovered field is bad");
   529   _next = discovered;
   530   _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
   531   _referent = java_lang_ref_Reference::referent(_ref);
   532   assert(Universe::heap()->is_in_reserved_or_null(_referent),
   533          "Wrong oop found in java.lang.Reference object");
   534   assert(allow_null_referent ?
   535              _referent->is_oop_or_null()
   536            : _referent->is_oop(),
   537          "bad referent");
   538 }
   540 inline void DiscoveredListIterator::next() {
   541   _prev_next = _discovered_addr;
   542   move_to_next();
   543 }
   545 inline void DiscoveredListIterator::remove() {
   546   assert(_ref->is_oop(), "Dropping a bad reference");
   547   oop_store_raw(_discovered_addr, NULL);
   548   // First _prev_next ref actually points into DiscoveredList (gross).
   549   if (UseCompressedOops) {
   550     // Remove Reference object from list.
   551     oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
   552   } else {
   553     // Remove Reference object from list.
   554     oopDesc::store_heap_oop((oop*)_prev_next, _next);
   555   }
   556   NOT_PRODUCT(_removed++);
   557   _refs_list.dec_length(1);
   558 }
   560 inline void DiscoveredListIterator::move_to_next() {
   561   _ref = _next;
   562   assert(_ref != _first_seen, "cyclic ref_list found");
   563   NOT_PRODUCT(_processed++);
   564 }
   566 // NOTE: process_phase*() are largely similar, and at a high level
   567 // merely iterate over the extant list applying a predicate to
   568 // each of its elements and possibly removing that element from the
   569 // list and applying some further closures to that element.
   570 // We should consider the possibility of replacing these
   571 // process_phase*() methods by abstracting them into
   572 // a single general iterator invocation that receives appropriate
   573 // closures that accomplish this work.
   575 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
   576 // referents are not alive, but that should be kept alive for policy reasons.
   577 // Keep alive the transitive closure of all such referents.
   578 void
   579 ReferenceProcessor::process_phase1(DiscoveredList&    refs_list,
   580                                    ReferencePolicy*   policy,
   581                                    BoolObjectClosure* is_alive,
   582                                    OopClosure*        keep_alive,
   583                                    VoidClosure*       complete_gc) {
   584   assert(policy != NULL, "Must have a non-NULL policy");
   585   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   586   // Decide which softly reachable refs should be kept alive.
   587   while (iter.has_next()) {
   588     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
   589     bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
   590     if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
   591       if (TraceReferenceGC) {
   592         gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
   593                                iter.obj(), iter.obj()->blueprint()->internal_name());
   594       }
   595       // Remove Reference object from list
   596       iter.remove();
   597       // Make the Reference object active again
   598       iter.make_active();
   599       // keep the referent around
   600       iter.make_referent_alive();
   601       iter.move_to_next();
   602     } else {
   603       iter.next();
   604     }
   605   }
   606   // Close the reachable set
   607   complete_gc->do_void();
   608   NOT_PRODUCT(
   609     if (PrintGCDetails && TraceReferenceGC) {
   610       gclog_or_tty->print(" Dropped %d dead Refs out of %d "
   611         "discovered Refs by policy ", iter.removed(), iter.processed());
   612     }
   613   )
   614 }
   616 // Traverse the list and remove any Refs that are not active, or
   617 // whose referents are either alive or NULL.
   618 void
   619 ReferenceProcessor::pp2_work(DiscoveredList&    refs_list,
   620                              BoolObjectClosure* is_alive,
   621                              OopClosure*        keep_alive) {
   622   assert(discovery_is_atomic(), "Error");
   623   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   624   while (iter.has_next()) {
   625     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
   626     DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
   627     assert(next == NULL, "Should not discover inactive Reference");
   628     if (iter.is_referent_alive()) {
   629       if (TraceReferenceGC) {
   630         gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
   631                                iter.obj(), iter.obj()->blueprint()->internal_name());
   632       }
   633       // The referent is reachable after all.
   634       // Remove Reference object from list.
   635       iter.remove();
   636       // Update the referent pointer as necessary: Note that this
   637       // should not entail any recursive marking because the
   638       // referent must already have been traversed.
   639       iter.make_referent_alive();
   640       iter.move_to_next();
   641     } else {
   642       iter.next();
   643     }
   644   }
   645   NOT_PRODUCT(
   646     if (PrintGCDetails && TraceReferenceGC) {
   647       gclog_or_tty->print(" Dropped %d active Refs out of %d "
   648         "Refs in discovered list ", iter.removed(), iter.processed());
   649     }
   650   )
   651 }
   653 void
   654 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList&    refs_list,
   655                                                   BoolObjectClosure* is_alive,
   656                                                   OopClosure*        keep_alive,
   657                                                   VoidClosure*       complete_gc) {
   658   assert(!discovery_is_atomic(), "Error");
   659   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   660   while (iter.has_next()) {
   661     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
   662     HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
   663     oop next = java_lang_ref_Reference::next(iter.obj());
   664     if ((iter.referent() == NULL || iter.is_referent_alive() ||
   665          next != NULL)) {
   666       assert(next->is_oop_or_null(), "bad next field");
   667       // Remove Reference object from list
   668       iter.remove();
   669       // Trace the cohorts
   670       iter.make_referent_alive();
   671       if (UseCompressedOops) {
   672         keep_alive->do_oop((narrowOop*)next_addr);
   673       } else {
   674         keep_alive->do_oop((oop*)next_addr);
   675       }
   676       iter.move_to_next();
   677     } else {
   678       iter.next();
   679     }
   680   }
   681   // Now close the newly reachable set
   682   complete_gc->do_void();
   683   NOT_PRODUCT(
   684     if (PrintGCDetails && TraceReferenceGC) {
   685       gclog_or_tty->print(" Dropped %d active Refs out of %d "
   686         "Refs in discovered list ", iter.removed(), iter.processed());
   687     }
   688   )
   689 }
   691 // Traverse the list and process the referents, by either
   692 // clearing them or keeping them (and their reachable
   693 // closure) alive.
   694 void
   695 ReferenceProcessor::process_phase3(DiscoveredList&    refs_list,
   696                                    bool               clear_referent,
   697                                    BoolObjectClosure* is_alive,
   698                                    OopClosure*        keep_alive,
   699                                    VoidClosure*       complete_gc) {
   700   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
   701   while (iter.has_next()) {
   702     iter.update_discovered();
   703     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
   704     if (clear_referent) {
   705       // NULL out referent pointer
   706       iter.clear_referent();
   707     } else {
   708       // keep the referent around
   709       iter.make_referent_alive();
   710     }
   711     if (TraceReferenceGC) {
   712       gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
   713                              clear_referent ? "cleared " : "",
   714                              iter.obj(), iter.obj()->blueprint()->internal_name());
   715     }
   716     assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
   717     // If discovery is concurrent, we may have objects with null referents,
   718     // being those that were concurrently cleared after they were discovered
   719     // (and not subsequently precleaned).
   720     assert(   (discovery_is_atomic() && iter.referent()->is_oop())
   721            || (!discovery_is_atomic() && iter.referent()->is_oop_or_null(UseConcMarkSweepGC)),
   722            "Adding a bad referent");
   723     iter.next();
   724   }
   725   // Remember to keep sentinel pointer around
   726   iter.update_discovered();
   727   // Close the reachable set
   728   complete_gc->do_void();
   729 }
   731 void
   732 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
   733   oop obj = refs_list.head();
   734   while (obj != sentinel_ref()) {
   735     oop discovered = java_lang_ref_Reference::discovered(obj);
   736     java_lang_ref_Reference::set_discovered_raw(obj, NULL);
   737     obj = discovered;
   738   }
   739   refs_list.set_head(sentinel_ref());
   740   refs_list.set_length(0);
   741 }
   743 void ReferenceProcessor::abandon_partial_discovery() {
   744   // loop over the lists
   745   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
   746     if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
   747       gclog_or_tty->print_cr(
   748         "\nAbandoning %s discovered list",
   749         list_name(i));
   750     }
   751     abandon_partial_discovered_list(_discoveredSoftRefs[i]);
   752   }
   753 }
   755 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
   756 public:
   757   RefProcPhase1Task(ReferenceProcessor& ref_processor,
   758                     DiscoveredList      refs_lists[],
   759                     ReferencePolicy*    policy,
   760                     bool                marks_oops_alive)
   761     : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
   762       _policy(policy)
   763   { }
   764   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
   765                     OopClosure& keep_alive,
   766                     VoidClosure& complete_gc)
   767   {
   768     _ref_processor.process_phase1(_refs_lists[i], _policy,
   769                                   &is_alive, &keep_alive, &complete_gc);
   770   }
   771 private:
   772   ReferencePolicy* _policy;
   773 };
   775 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
   776 public:
   777   RefProcPhase2Task(ReferenceProcessor& ref_processor,
   778                     DiscoveredList      refs_lists[],
   779                     bool                marks_oops_alive)
   780     : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
   781   { }
   782   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
   783                     OopClosure& keep_alive,
   784                     VoidClosure& complete_gc)
   785   {
   786     _ref_processor.process_phase2(_refs_lists[i],
   787                                   &is_alive, &keep_alive, &complete_gc);
   788   }
   789 };
   791 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
   792 public:
   793   RefProcPhase3Task(ReferenceProcessor& ref_processor,
   794                     DiscoveredList      refs_lists[],
   795                     bool                clear_referent,
   796                     bool                marks_oops_alive)
   797     : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
   798       _clear_referent(clear_referent)
   799   { }
   800   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
   801                     OopClosure& keep_alive,
   802                     VoidClosure& complete_gc)
   803   {
   804     _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
   805                                   &is_alive, &keep_alive, &complete_gc);
   806   }
   807 private:
   808   bool _clear_referent;
   809 };
   811 // Balances reference queues.
   812 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
   813 {
   814   // calculate total length
   815   size_t total_refs = 0;
   816   for (int i = 0; i < _num_q; ++i) {
   817     total_refs += ref_lists[i].length();
   818   }
   819   size_t avg_refs = total_refs / _num_q + 1;
   820   int to_idx = 0;
   821   for (int from_idx = 0; from_idx < _num_q; from_idx++) {
   822     while (ref_lists[from_idx].length() > avg_refs) {
   823       assert(to_idx < _num_q, "Sanity Check!");
   824       if (ref_lists[to_idx].length() < avg_refs) {
   825         // move superfluous refs
   826         size_t refs_to_move =
   827           MIN2(ref_lists[from_idx].length() - avg_refs,
   828                avg_refs - ref_lists[to_idx].length());
   829         oop move_head = ref_lists[from_idx].head();
   830         oop move_tail = move_head;
   831         oop new_head  = move_head;
   832         // find an element to split the list on
   833         for (size_t j = 0; j < refs_to_move; ++j) {
   834           move_tail = new_head;
   835           new_head = java_lang_ref_Reference::discovered(new_head);
   836         }
   837         java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
   838         ref_lists[to_idx].set_head(move_head);
   839         ref_lists[to_idx].inc_length(refs_to_move);
   840         ref_lists[from_idx].set_head(new_head);
   841         ref_lists[from_idx].dec_length(refs_to_move);
   842       } else {
   843         ++to_idx;
   844       }
   845     }
   846   }
   847 }
   849 void
   850 ReferenceProcessor::process_discovered_reflist(
   851   DiscoveredList               refs_lists[],
   852   ReferencePolicy*             policy,
   853   bool                         clear_referent,
   854   BoolObjectClosure*           is_alive,
   855   OopClosure*                  keep_alive,
   856   VoidClosure*                 complete_gc,
   857   AbstractRefProcTaskExecutor* task_executor)
   858 {
   859   bool mt = task_executor != NULL && _processing_is_mt;
   860   if (mt && ParallelRefProcBalancingEnabled) {
   861     balance_queues(refs_lists);
   862   }
   863   if (PrintReferenceGC && PrintGCDetails) {
   864     size_t total = 0;
   865     for (int i = 0; i < _num_q; ++i) {
   866       total += refs_lists[i].length();
   867     }
   868     gclog_or_tty->print(", %u refs", total);
   869   }
   871   // Phase 1 (soft refs only):
   872   // . Traverse the list and remove any SoftReferences whose
   873   //   referents are not alive, but that should be kept alive for
   874   //   policy reasons. Keep alive the transitive closure of all
   875   //   such referents.
   876   if (policy != NULL) {
   877     if (mt) {
   878       RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
   879       task_executor->execute(phase1);
   880     } else {
   881       for (int i = 0; i < _num_q; i++) {
   882         process_phase1(refs_lists[i], policy,
   883                        is_alive, keep_alive, complete_gc);
   884       }
   885     }
   886   } else { // policy == NULL
   887     assert(refs_lists != _discoveredSoftRefs,
   888            "Policy must be specified for soft references.");
   889   }
   891   // Phase 2:
   892   // . Traverse the list and remove any refs whose referents are alive.
   893   if (mt) {
   894     RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
   895     task_executor->execute(phase2);
   896   } else {
   897     for (int i = 0; i < _num_q; i++) {
   898       process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
   899     }
   900   }
   902   // Phase 3:
   903   // . Traverse the list and process referents as appropriate.
   904   if (mt) {
   905     RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
   906     task_executor->execute(phase3);
   907   } else {
   908     for (int i = 0; i < _num_q; i++) {
   909       process_phase3(refs_lists[i], clear_referent,
   910                      is_alive, keep_alive, complete_gc);
   911     }
   912   }
   913 }
   915 void ReferenceProcessor::clean_up_discovered_references() {
   916   // loop over the lists
   917   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
   918     if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
   919       gclog_or_tty->print_cr(
   920         "\nScrubbing %s discovered list of Null referents",
   921         list_name(i));
   922     }
   923     clean_up_discovered_reflist(_discoveredSoftRefs[i]);
   924   }
   925 }
   927 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
   928   assert(!discovery_is_atomic(), "Else why call this method?");
   929   DiscoveredListIterator iter(refs_list, NULL, NULL);
   930   while (iter.has_next()) {
   931     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
   932     oop next = java_lang_ref_Reference::next(iter.obj());
   933     assert(next->is_oop_or_null(), "bad next field");
   934     // If referent has been cleared or Reference is not active,
   935     // drop it.
   936     if (iter.referent() == NULL || next != NULL) {
   937       debug_only(
   938         if (PrintGCDetails && TraceReferenceGC) {
   939           gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
   940             INTPTR_FORMAT " with next field: " INTPTR_FORMAT
   941             " and referent: " INTPTR_FORMAT,
   942             iter.obj(), next, iter.referent());
   943         }
   944       )
   945       // Remove Reference object from list
   946       iter.remove();
   947       iter.move_to_next();
   948     } else {
   949       iter.next();
   950     }
   951   }
   952   NOT_PRODUCT(
   953     if (PrintGCDetails && TraceReferenceGC) {
   954       gclog_or_tty->print(
   955         " Removed %d Refs with NULL referents out of %d discovered Refs",
   956         iter.removed(), iter.processed());
   957     }
   958   )
   959 }
   961 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
   962   int id = 0;
   963   // Determine the queue index to use for this object.
   964   if (_discovery_is_mt) {
   965     // During a multi-threaded discovery phase,
   966     // each thread saves to its "own" list.
   967     Thread* thr = Thread::current();
   968     assert(thr->is_GC_task_thread(),
   969            "Dubious cast from Thread* to WorkerThread*?");
   970     id = ((WorkerThread*)thr)->id();
   971   } else {
   972     // single-threaded discovery, we save in round-robin
   973     // fashion to each of the lists.
   974     if (_processing_is_mt) {
   975       id = next_id();
   976     }
   977   }
   978   assert(0 <= id && id < _num_q, "Id is out-of-bounds (call Freud?)");
   980   // Get the discovered queue to which we will add
   981   DiscoveredList* list = NULL;
   982   switch (rt) {
   983     case REF_OTHER:
   984       // Unknown reference type, no special treatment
   985       break;
   986     case REF_SOFT:
   987       list = &_discoveredSoftRefs[id];
   988       break;
   989     case REF_WEAK:
   990       list = &_discoveredWeakRefs[id];
   991       break;
   992     case REF_FINAL:
   993       list = &_discoveredFinalRefs[id];
   994       break;
   995     case REF_PHANTOM:
   996       list = &_discoveredPhantomRefs[id];
   997       break;
   998     case REF_NONE:
   999       // we should not reach here if we are an instanceRefKlass
  1000     default:
  1001       ShouldNotReachHere();
  1003   return list;
  1006 inline void
  1007 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
  1008                                               oop             obj,
  1009                                               HeapWord*       discovered_addr) {
  1010   assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
  1011   // First we must make sure this object is only enqueued once. CAS in a non null
  1012   // discovered_addr.
  1013   oop current_head = refs_list.head();
  1015   // Note: In the case of G1, this pre-barrier is strictly
  1016   // not necessary because the only case we are interested in
  1017   // here is when *discovered_addr is NULL, so this will expand to
  1018   // nothing. As a result, I am just manually eliding this out for G1.
  1019   if (_discovered_list_needs_barrier && !UseG1GC) {
  1020     _bs->write_ref_field_pre((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
  1022   oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
  1023                                                     NULL);
  1024   if (retest == NULL) {
  1025     // This thread just won the right to enqueue the object.
  1026     // We have separate lists for enqueueing so no synchronization
  1027     // is necessary.
  1028     refs_list.set_head(obj);
  1029     refs_list.inc_length(1);
  1030     if (_discovered_list_needs_barrier) {
  1031       _bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
  1034   } else {
  1035     // If retest was non NULL, another thread beat us to it:
  1036     // The reference has already been discovered...
  1037     if (TraceReferenceGC) {
  1038       gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
  1039                              obj, obj->blueprint()->internal_name());
  1044 // We mention two of several possible choices here:
  1045 // #0: if the reference object is not in the "originating generation"
  1046 //     (or part of the heap being collected, indicated by our "span"
  1047 //     we don't treat it specially (i.e. we scan it as we would
  1048 //     a normal oop, treating its references as strong references).
  1049 //     This means that references can't be enqueued unless their
  1050 //     referent is also in the same span. This is the simplest,
  1051 //     most "local" and most conservative approach, albeit one
  1052 //     that may cause weak references to be enqueued least promptly.
  1053 //     We call this choice the "ReferenceBasedDiscovery" policy.
  1054 // #1: the reference object may be in any generation (span), but if
  1055 //     the referent is in the generation (span) being currently collected
  1056 //     then we can discover the reference object, provided
  1057 //     the object has not already been discovered by
  1058 //     a different concurrently running collector (as may be the
  1059 //     case, for instance, if the reference object is in CMS and
  1060 //     the referent in DefNewGeneration), and provided the processing
  1061 //     of this reference object by the current collector will
  1062 //     appear atomic to every other collector in the system.
  1063 //     (Thus, for instance, a concurrent collector may not
  1064 //     discover references in other generations even if the
  1065 //     referent is in its own generation). This policy may,
  1066 //     in certain cases, enqueue references somewhat sooner than
  1067 //     might Policy #0 above, but at marginally increased cost
  1068 //     and complexity in processing these references.
  1069 //     We call this choice the "RefeferentBasedDiscovery" policy.
  1070 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
  1071   // We enqueue references only if we are discovering refs
  1072   // (rather than processing discovered refs).
  1073   if (!_discovering_refs || !RegisterReferences) {
  1074     return false;
  1076   // We only enqueue active references.
  1077   oop next = java_lang_ref_Reference::next(obj);
  1078   if (next != NULL) {
  1079     return false;
  1082   HeapWord* obj_addr = (HeapWord*)obj;
  1083   if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
  1084       !_span.contains(obj_addr)) {
  1085     // Reference is not in the originating generation;
  1086     // don't treat it specially (i.e. we want to scan it as a normal
  1087     // object with strong references).
  1088     return false;
  1091   // We only enqueue references whose referents are not (yet) strongly
  1092   // reachable.
  1093   if (is_alive_non_header() != NULL) {
  1094     oop referent = java_lang_ref_Reference::referent(obj);
  1095     // We'd like to assert the following:
  1096     // assert(referent != NULL, "Refs with null referents already filtered");
  1097     // However, since this code may be executed concurrently with
  1098     // mutators, which can clear() the referent, it is not
  1099     // guaranteed that the referent is non-NULL.
  1100     if (is_alive_non_header()->do_object_b(referent)) {
  1101       return false;  // referent is reachable
  1105   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
  1106   const oop  discovered = java_lang_ref_Reference::discovered(obj);
  1107   assert(discovered->is_oop_or_null(), "bad discovered field");
  1108   if (discovered != NULL) {
  1109     // The reference has already been discovered...
  1110     if (TraceReferenceGC) {
  1111       gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
  1112                              obj, obj->blueprint()->internal_name());
  1114     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
  1115       // assumes that an object is not processed twice;
  1116       // if it's been already discovered it must be on another
  1117       // generation's discovered list; so we won't discover it.
  1118       return false;
  1119     } else {
  1120       assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
  1121              "Unrecognized policy");
  1122       // Check assumption that an object is not potentially
  1123       // discovered twice except by concurrent collectors that potentially
  1124       // trace the same Reference object twice.
  1125       assert(UseConcMarkSweepGC,
  1126              "Only possible with an incremental-update concurrent collector");
  1127       return true;
  1131   if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
  1132     oop referent = java_lang_ref_Reference::referent(obj);
  1133     assert(referent->is_oop(), "bad referent");
  1134     // enqueue if and only if either:
  1135     // reference is in our span or
  1136     // we are an atomic collector and referent is in our span
  1137     if (_span.contains(obj_addr) ||
  1138         (discovery_is_atomic() && _span.contains(referent))) {
  1139       // should_enqueue = true;
  1140     } else {
  1141       return false;
  1143   } else {
  1144     assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
  1145            _span.contains(obj_addr), "code inconsistency");
  1148   // Get the right type of discovered queue head.
  1149   DiscoveredList* list = get_discovered_list(rt);
  1150   if (list == NULL) {
  1151     return false;   // nothing special needs to be done
  1154   if (_discovery_is_mt) {
  1155     add_to_discovered_list_mt(*list, obj, discovered_addr);
  1156   } else {
  1157     // If "_discovered_list_needs_barrier", we do write barriers when
  1158     // updating the discovered reference list.  Otherwise, we do a raw store
  1159     // here: the field will be visited later when processing the discovered
  1160     // references.
  1161     oop current_head = list->head();
  1162     // As in the case further above, since we are over-writing a NULL
  1163     // pre-value, we can safely elide the pre-barrier here for the case of G1.
  1164     assert(discovered == NULL, "control point invariant");
  1165     if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
  1166       _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
  1168     oop_store_raw(discovered_addr, current_head);
  1169     if (_discovered_list_needs_barrier) {
  1170       _bs->write_ref_field((oop*)discovered_addr, current_head);
  1172     list->set_head(obj);
  1173     list->inc_length(1);
  1176   // In the MT discovery case, it is currently possible to see
  1177   // the following message multiple times if several threads
  1178   // discover a reference about the same time. Only one will
  1179   // however have actually added it to the disocvered queue.
  1180   // One could let add_to_discovered_list_mt() return an
  1181   // indication for success in queueing (by 1 thread) or
  1182   // failure (by all other threads), but I decided the extra
  1183   // code was not worth the effort for something that is
  1184   // only used for debugging support.
  1185   if (TraceReferenceGC) {
  1186     oop referent = java_lang_ref_Reference::referent(obj);
  1187     if (PrintGCDetails) {
  1188       gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
  1189                              obj, obj->blueprint()->internal_name());
  1191     assert(referent->is_oop(), "Enqueued a bad referent");
  1193   assert(obj->is_oop(), "Enqueued a bad reference");
  1194   return true;
  1197 // Preclean the discovered references by removing those
  1198 // whose referents are alive, and by marking from those that
  1199 // are not active. These lists can be handled here
  1200 // in any order and, indeed, concurrently.
  1201 void ReferenceProcessor::preclean_discovered_references(
  1202   BoolObjectClosure* is_alive,
  1203   OopClosure* keep_alive,
  1204   VoidClosure* complete_gc,
  1205   YieldClosure* yield) {
  1207   NOT_PRODUCT(verify_ok_to_handle_reflists());
  1209   // Soft references
  1211     TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
  1212               false, gclog_or_tty);
  1213     for (int i = 0; i < _num_q; i++) {
  1214       if (yield->should_return()) {
  1215         return;
  1217       preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
  1218                                   keep_alive, complete_gc, yield);
  1222   // Weak references
  1224     TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
  1225               false, gclog_or_tty);
  1226     for (int i = 0; i < _num_q; i++) {
  1227       if (yield->should_return()) {
  1228         return;
  1230       preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
  1231                                   keep_alive, complete_gc, yield);
  1235   // Final references
  1237     TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
  1238               false, gclog_or_tty);
  1239     for (int i = 0; i < _num_q; i++) {
  1240       if (yield->should_return()) {
  1241         return;
  1243       preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
  1244                                   keep_alive, complete_gc, yield);
  1248   // Phantom references
  1250     TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
  1251               false, gclog_or_tty);
  1252     for (int i = 0; i < _num_q; i++) {
  1253       if (yield->should_return()) {
  1254         return;
  1256       preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
  1257                                   keep_alive, complete_gc, yield);
  1262 // Walk the given discovered ref list, and remove all reference objects
  1263 // whose referents are still alive, whose referents are NULL or which
  1264 // are not active (have a non-NULL next field). NOTE: When we are
  1265 // thus precleaning the ref lists (which happens single-threaded today),
  1266 // we do not disable refs discovery to honour the correct semantics of
  1267 // java.lang.Reference. As a result, we need to be careful below
  1268 // that ref removal steps interleave safely with ref discovery steps
  1269 // (in this thread).
  1270 void
  1271 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList&    refs_list,
  1272                                                 BoolObjectClosure* is_alive,
  1273                                                 OopClosure*        keep_alive,
  1274                                                 VoidClosure*       complete_gc,
  1275                                                 YieldClosure*      yield) {
  1276   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
  1277   while (iter.has_next()) {
  1278     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
  1279     oop obj = iter.obj();
  1280     oop next = java_lang_ref_Reference::next(obj);
  1281     if (iter.referent() == NULL || iter.is_referent_alive() ||
  1282         next != NULL) {
  1283       // The referent has been cleared, or is alive, or the Reference is not
  1284       // active; we need to trace and mark its cohort.
  1285       if (TraceReferenceGC) {
  1286         gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
  1287                                iter.obj(), iter.obj()->blueprint()->internal_name());
  1289       // Remove Reference object from list
  1290       iter.remove();
  1291       // Keep alive its cohort.
  1292       iter.make_referent_alive();
  1293       if (UseCompressedOops) {
  1294         narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
  1295         keep_alive->do_oop(next_addr);
  1296       } else {
  1297         oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
  1298         keep_alive->do_oop(next_addr);
  1300       iter.move_to_next();
  1301     } else {
  1302       iter.next();
  1305   // Close the reachable set
  1306   complete_gc->do_void();
  1308   NOT_PRODUCT(
  1309     if (PrintGCDetails && PrintReferenceGC) {
  1310       gclog_or_tty->print(" Dropped %d Refs out of %d "
  1311         "Refs in discovered list ", iter.removed(), iter.processed());
  1316 const char* ReferenceProcessor::list_name(int i) {
  1317    assert(i >= 0 && i <= _num_q * subclasses_of_ref, "Out of bounds index");
  1318    int j = i / _num_q;
  1319    switch (j) {
  1320      case 0: return "SoftRef";
  1321      case 1: return "WeakRef";
  1322      case 2: return "FinalRef";
  1323      case 3: return "PhantomRef";
  1325    ShouldNotReachHere();
  1326    return NULL;
  1329 #ifndef PRODUCT
  1330 void ReferenceProcessor::verify_ok_to_handle_reflists() {
  1331   // empty for now
  1333 #endif
  1335 void ReferenceProcessor::verify() {
  1336   guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
  1339 #ifndef PRODUCT
  1340 void ReferenceProcessor::clear_discovered_references() {
  1341   guarantee(!_discovering_refs, "Discovering refs?");
  1342   for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
  1343     oop obj = _discoveredSoftRefs[i].head();
  1344     while (obj != sentinel_ref()) {
  1345       oop next = java_lang_ref_Reference::discovered(obj);
  1346       java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
  1347       obj = next;
  1349     _discoveredSoftRefs[i].set_head(sentinel_ref());
  1350     _discoveredSoftRefs[i].set_length(0);
  1353 #endif // PRODUCT

mercurial