1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/memory/referenceProcessor.cpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,1395 @@ 1.4 +/* 1.5 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "precompiled.hpp" 1.29 +#include "classfile/javaClasses.hpp" 1.30 +#include "classfile/systemDictionary.hpp" 1.31 +#include "gc_implementation/shared/gcTimer.hpp" 1.32 +#include "gc_implementation/shared/gcTraceTime.hpp" 1.33 +#include "gc_interface/collectedHeap.hpp" 1.34 +#include "gc_interface/collectedHeap.inline.hpp" 1.35 +#include "memory/referencePolicy.hpp" 1.36 +#include "memory/referenceProcessor.hpp" 1.37 +#include "oops/oop.inline.hpp" 1.38 +#include "runtime/java.hpp" 1.39 +#include "runtime/jniHandles.hpp" 1.40 + 1.41 +PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 1.42 + 1.43 +ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 1.44 +ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 1.45 +bool ReferenceProcessor::_pending_list_uses_discovered_field = false; 1.46 +jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 1.47 + 1.48 +void referenceProcessor_init() { 1.49 + ReferenceProcessor::init_statics(); 1.50 +} 1.51 + 1.52 +void ReferenceProcessor::init_statics() { 1.53 + // We need a monotonically non-deccreasing time in ms but 1.54 + // os::javaTimeMillis() does not guarantee monotonicity. 1.55 + jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1.56 + 1.57 + // Initialize the soft ref timestamp clock. 1.58 + _soft_ref_timestamp_clock = now; 1.59 + // Also update the soft ref clock in j.l.r.SoftReference 1.60 + java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 1.61 + 1.62 + _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 1.63 + _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 1.64 + NOT_COMPILER2(LRUCurrentHeapPolicy()); 1.65 + if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 1.66 + vm_exit_during_initialization("Could not allocate reference policy object"); 1.67 + } 1.68 + guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 1.69 + RefDiscoveryPolicy == ReferentBasedDiscovery, 1.70 + "Unrecongnized RefDiscoveryPolicy"); 1.71 + _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); 1.72 +} 1.73 + 1.74 +void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) { 1.75 +#ifdef ASSERT 1.76 + // Verify that we're not currently discovering refs 1.77 + assert(!verify_disabled || !_discovering_refs, "nested call?"); 1.78 + 1.79 + if (check_no_refs) { 1.80 + // Verify that the discovered lists are empty 1.81 + verify_no_references_recorded(); 1.82 + } 1.83 +#endif // ASSERT 1.84 + 1.85 + // Someone could have modified the value of the static 1.86 + // field in the j.l.r.SoftReference class that holds the 1.87 + // soft reference timestamp clock using reflection or 1.88 + // Unsafe between GCs. Unconditionally update the static 1.89 + // field in ReferenceProcessor here so that we use the new 1.90 + // value during reference discovery. 1.91 + 1.92 + _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 1.93 + _discovering_refs = true; 1.94 +} 1.95 + 1.96 +ReferenceProcessor::ReferenceProcessor(MemRegion span, 1.97 + bool mt_processing, 1.98 + uint mt_processing_degree, 1.99 + bool mt_discovery, 1.100 + uint mt_discovery_degree, 1.101 + bool atomic_discovery, 1.102 + BoolObjectClosure* is_alive_non_header) : 1.103 + _discovering_refs(false), 1.104 + _enqueuing_is_done(false), 1.105 + _is_alive_non_header(is_alive_non_header), 1.106 + _processing_is_mt(mt_processing), 1.107 + _next_id(0) 1.108 +{ 1.109 + _span = span; 1.110 + _discovery_is_atomic = atomic_discovery; 1.111 + _discovery_is_mt = mt_discovery; 1.112 + _num_q = MAX2(1U, mt_processing_degree); 1.113 + _max_num_q = MAX2(_num_q, mt_discovery_degree); 1.114 + _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 1.115 + _max_num_q * number_of_subclasses_of_ref(), mtGC); 1.116 + 1.117 + if (_discovered_refs == NULL) { 1.118 + vm_exit_during_initialization("Could not allocated RefProc Array"); 1.119 + } 1.120 + _discoveredSoftRefs = &_discovered_refs[0]; 1.121 + _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 1.122 + _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 1.123 + _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 1.124 + 1.125 + // Initialize all entries to NULL 1.126 + for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1.127 + _discovered_refs[i].set_head(NULL); 1.128 + _discovered_refs[i].set_length(0); 1.129 + } 1.130 + 1.131 + setup_policy(false /* default soft ref policy */); 1.132 +} 1.133 + 1.134 +#ifndef PRODUCT 1.135 +void ReferenceProcessor::verify_no_references_recorded() { 1.136 + guarantee(!_discovering_refs, "Discovering refs?"); 1.137 + for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1.138 + guarantee(_discovered_refs[i].is_empty(), 1.139 + "Found non-empty discovered list"); 1.140 + } 1.141 +} 1.142 +#endif 1.143 + 1.144 +void ReferenceProcessor::weak_oops_do(OopClosure* f) { 1.145 + for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1.146 + if (UseCompressedOops) { 1.147 + f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 1.148 + } else { 1.149 + f->do_oop((oop*)_discovered_refs[i].adr_head()); 1.150 + } 1.151 + } 1.152 +} 1.153 + 1.154 +void ReferenceProcessor::update_soft_ref_master_clock() { 1.155 + // Update (advance) the soft ref master clock field. This must be done 1.156 + // after processing the soft ref list. 1.157 + 1.158 + // We need a monotonically non-deccreasing time in ms but 1.159 + // os::javaTimeMillis() does not guarantee monotonicity. 1.160 + jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1.161 + jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 1.162 + assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 1.163 + 1.164 + NOT_PRODUCT( 1.165 + if (now < _soft_ref_timestamp_clock) { 1.166 + warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, 1.167 + _soft_ref_timestamp_clock, now); 1.168 + } 1.169 + ) 1.170 + // The values of now and _soft_ref_timestamp_clock are set using 1.171 + // javaTimeNanos(), which is guaranteed to be monotonically 1.172 + // non-decreasing provided the underlying platform provides such 1.173 + // a time source (and it is bug free). 1.174 + // In product mode, however, protect ourselves from non-monotonicty. 1.175 + if (now > _soft_ref_timestamp_clock) { 1.176 + _soft_ref_timestamp_clock = now; 1.177 + java_lang_ref_SoftReference::set_clock(now); 1.178 + } 1.179 + // Else leave clock stalled at its old value until time progresses 1.180 + // past clock value. 1.181 +} 1.182 + 1.183 +size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { 1.184 + size_t total = 0; 1.185 + for (uint i = 0; i < _max_num_q; ++i) { 1.186 + total += lists[i].length(); 1.187 + } 1.188 + return total; 1.189 +} 1.190 + 1.191 +ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 1.192 + BoolObjectClosure* is_alive, 1.193 + OopClosure* keep_alive, 1.194 + VoidClosure* complete_gc, 1.195 + AbstractRefProcTaskExecutor* task_executor, 1.196 + GCTimer* gc_timer) { 1.197 + NOT_PRODUCT(verify_ok_to_handle_reflists()); 1.198 + 1.199 + assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 1.200 + // Stop treating discovered references specially. 1.201 + disable_discovery(); 1.202 + 1.203 + // If discovery was concurrent, someone could have modified 1.204 + // the value of the static field in the j.l.r.SoftReference 1.205 + // class that holds the soft reference timestamp clock using 1.206 + // reflection or Unsafe between when discovery was enabled and 1.207 + // now. Unconditionally update the static field in ReferenceProcessor 1.208 + // here so that we use the new value during processing of the 1.209 + // discovered soft refs. 1.210 + 1.211 + _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 1.212 + 1.213 + bool trace_time = PrintGCDetails && PrintReferenceGC; 1.214 + 1.215 + // Soft references 1.216 + size_t soft_count = 0; 1.217 + { 1.218 + GCTraceTime tt("SoftReference", trace_time, false, gc_timer); 1.219 + soft_count = 1.220 + process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 1.221 + is_alive, keep_alive, complete_gc, task_executor); 1.222 + } 1.223 + 1.224 + update_soft_ref_master_clock(); 1.225 + 1.226 + // Weak references 1.227 + size_t weak_count = 0; 1.228 + { 1.229 + GCTraceTime tt("WeakReference", trace_time, false, gc_timer); 1.230 + weak_count = 1.231 + process_discovered_reflist(_discoveredWeakRefs, NULL, true, 1.232 + is_alive, keep_alive, complete_gc, task_executor); 1.233 + } 1.234 + 1.235 + // Final references 1.236 + size_t final_count = 0; 1.237 + { 1.238 + GCTraceTime tt("FinalReference", trace_time, false, gc_timer); 1.239 + final_count = 1.240 + process_discovered_reflist(_discoveredFinalRefs, NULL, false, 1.241 + is_alive, keep_alive, complete_gc, task_executor); 1.242 + } 1.243 + 1.244 + // Phantom references 1.245 + size_t phantom_count = 0; 1.246 + { 1.247 + GCTraceTime tt("PhantomReference", trace_time, false, gc_timer); 1.248 + phantom_count = 1.249 + process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 1.250 + is_alive, keep_alive, complete_gc, task_executor); 1.251 + } 1.252 + 1.253 + // Weak global JNI references. It would make more sense (semantically) to 1.254 + // traverse these simultaneously with the regular weak references above, but 1.255 + // that is not how the JDK1.2 specification is. See #4126360. Native code can 1.256 + // thus use JNI weak references to circumvent the phantom references and 1.257 + // resurrect a "post-mortem" object. 1.258 + { 1.259 + GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer); 1.260 + if (task_executor != NULL) { 1.261 + task_executor->set_single_threaded_mode(); 1.262 + } 1.263 + process_phaseJNI(is_alive, keep_alive, complete_gc); 1.264 + } 1.265 + 1.266 + return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count); 1.267 +} 1.268 + 1.269 +#ifndef PRODUCT 1.270 +// Calculate the number of jni handles. 1.271 +uint ReferenceProcessor::count_jni_refs() { 1.272 + class AlwaysAliveClosure: public BoolObjectClosure { 1.273 + public: 1.274 + virtual bool do_object_b(oop obj) { return true; } 1.275 + }; 1.276 + 1.277 + class CountHandleClosure: public OopClosure { 1.278 + private: 1.279 + int _count; 1.280 + public: 1.281 + CountHandleClosure(): _count(0) {} 1.282 + void do_oop(oop* unused) { _count++; } 1.283 + void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 1.284 + int count() { return _count; } 1.285 + }; 1.286 + CountHandleClosure global_handle_count; 1.287 + AlwaysAliveClosure always_alive; 1.288 + JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 1.289 + return global_handle_count.count(); 1.290 +} 1.291 +#endif 1.292 + 1.293 +void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 1.294 + OopClosure* keep_alive, 1.295 + VoidClosure* complete_gc) { 1.296 +#ifndef PRODUCT 1.297 + if (PrintGCDetails && PrintReferenceGC) { 1.298 + unsigned int count = count_jni_refs(); 1.299 + gclog_or_tty->print(", %u refs", count); 1.300 + } 1.301 +#endif 1.302 + JNIHandles::weak_oops_do(is_alive, keep_alive); 1.303 + complete_gc->do_void(); 1.304 +} 1.305 + 1.306 + 1.307 +template <class T> 1.308 +bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 1.309 + AbstractRefProcTaskExecutor* task_executor) { 1.310 + 1.311 + // Remember old value of pending references list 1.312 + T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 1.313 + T old_pending_list_value = *pending_list_addr; 1.314 + 1.315 + // Enqueue references that are not made active again, and 1.316 + // clear the decks for the next collection (cycle). 1.317 + ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 1.318 + // Do the post-barrier on pending_list_addr missed in 1.319 + // enqueue_discovered_reflist. 1.320 + oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 1.321 + 1.322 + // Stop treating discovered references specially. 1.323 + ref->disable_discovery(); 1.324 + 1.325 + // Return true if new pending references were added 1.326 + return old_pending_list_value != *pending_list_addr; 1.327 +} 1.328 + 1.329 +bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 1.330 + NOT_PRODUCT(verify_ok_to_handle_reflists()); 1.331 + if (UseCompressedOops) { 1.332 + return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 1.333 + } else { 1.334 + return enqueue_discovered_ref_helper<oop>(this, task_executor); 1.335 + } 1.336 +} 1.337 + 1.338 +void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 1.339 + HeapWord* pending_list_addr) { 1.340 + // Given a list of refs linked through the "discovered" field 1.341 + // (java.lang.ref.Reference.discovered), self-loop their "next" field 1.342 + // thus distinguishing them from active References, then 1.343 + // prepend them to the pending list. 1.344 + // 1.345 + // The Java threads will see the Reference objects linked together through 1.346 + // the discovered field. Instead of trying to do the write barrier updates 1.347 + // in all places in the reference processor where we manipulate the discovered 1.348 + // field we make sure to do the barrier here where we anyway iterate through 1.349 + // all linked Reference objects. Note that it is important to not dirty any 1.350 + // cards during reference processing since this will cause card table 1.351 + // verification to fail for G1. 1.352 + // 1.353 + // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777), 1.354 + // the "next" field is used to chain the pending list, not the discovered 1.355 + // field. 1.356 + if (TraceReferenceGC && PrintGCDetails) { 1.357 + gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 1.358 + INTPTR_FORMAT, (address)refs_list.head()); 1.359 + } 1.360 + 1.361 + oop obj = NULL; 1.362 + oop next_d = refs_list.head(); 1.363 + if (pending_list_uses_discovered_field()) { // New behavior 1.364 + // Walk down the list, self-looping the next field 1.365 + // so that the References are not considered active. 1.366 + while (obj != next_d) { 1.367 + obj = next_d; 1.368 + assert(obj->is_instanceRef(), "should be reference object"); 1.369 + next_d = java_lang_ref_Reference::discovered(obj); 1.370 + if (TraceReferenceGC && PrintGCDetails) { 1.371 + gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 1.372 + (void *)obj, (void *)next_d); 1.373 + } 1.374 + assert(java_lang_ref_Reference::next(obj) == NULL, 1.375 + "Reference not active; should not be discovered"); 1.376 + // Self-loop next, so as to make Ref not active. 1.377 + java_lang_ref_Reference::set_next_raw(obj, obj); 1.378 + if (next_d != obj) { 1.379 + oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); 1.380 + } else { 1.381 + // This is the last object. 1.382 + // Swap refs_list into pending_list_addr and 1.383 + // set obj's discovered to what we read from pending_list_addr. 1.384 + oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 1.385 + // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above. 1.386 + java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL 1.387 + oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); 1.388 + } 1.389 + } 1.390 + } else { // Old behaviour 1.391 + // Walk down the list, copying the discovered field into 1.392 + // the next field and clearing the discovered field. 1.393 + while (obj != next_d) { 1.394 + obj = next_d; 1.395 + assert(obj->is_instanceRef(), "should be reference object"); 1.396 + next_d = java_lang_ref_Reference::discovered(obj); 1.397 + if (TraceReferenceGC && PrintGCDetails) { 1.398 + gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 1.399 + (void *)obj, (void *)next_d); 1.400 + } 1.401 + assert(java_lang_ref_Reference::next(obj) == NULL, 1.402 + "The reference should not be enqueued"); 1.403 + if (next_d == obj) { // obj is last 1.404 + // Swap refs_list into pendling_list_addr and 1.405 + // set obj's next to what we read from pending_list_addr. 1.406 + oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 1.407 + // Need oop_check on pending_list_addr above; 1.408 + // see special oop-check code at the end of 1.409 + // enqueue_discovered_reflists() further below. 1.410 + if (old == NULL) { 1.411 + // obj should be made to point to itself, since 1.412 + // pending list was empty. 1.413 + java_lang_ref_Reference::set_next(obj, obj); 1.414 + } else { 1.415 + java_lang_ref_Reference::set_next(obj, old); 1.416 + } 1.417 + } else { 1.418 + java_lang_ref_Reference::set_next(obj, next_d); 1.419 + } 1.420 + java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 1.421 + } 1.422 + } 1.423 +} 1.424 + 1.425 +// Parallel enqueue task 1.426 +class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 1.427 +public: 1.428 + RefProcEnqueueTask(ReferenceProcessor& ref_processor, 1.429 + DiscoveredList discovered_refs[], 1.430 + HeapWord* pending_list_addr, 1.431 + int n_queues) 1.432 + : EnqueueTask(ref_processor, discovered_refs, 1.433 + pending_list_addr, n_queues) 1.434 + { } 1.435 + 1.436 + virtual void work(unsigned int work_id) { 1.437 + assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 1.438 + // Simplest first cut: static partitioning. 1.439 + int index = work_id; 1.440 + // The increment on "index" must correspond to the maximum number of queues 1.441 + // (n_queues) with which that ReferenceProcessor was created. That 1.442 + // is because of the "clever" way the discovered references lists were 1.443 + // allocated and are indexed into. 1.444 + assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 1.445 + for (int j = 0; 1.446 + j < ReferenceProcessor::number_of_subclasses_of_ref(); 1.447 + j++, index += _n_queues) { 1.448 + _ref_processor.enqueue_discovered_reflist( 1.449 + _refs_lists[index], _pending_list_addr); 1.450 + _refs_lists[index].set_head(NULL); 1.451 + _refs_lists[index].set_length(0); 1.452 + } 1.453 + } 1.454 +}; 1.455 + 1.456 +// Enqueue references that are not made active again 1.457 +void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 1.458 + AbstractRefProcTaskExecutor* task_executor) { 1.459 + if (_processing_is_mt && task_executor != NULL) { 1.460 + // Parallel code 1.461 + RefProcEnqueueTask tsk(*this, _discovered_refs, 1.462 + pending_list_addr, _max_num_q); 1.463 + task_executor->execute(tsk); 1.464 + } else { 1.465 + // Serial code: call the parent class's implementation 1.466 + for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1.467 + enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr); 1.468 + _discovered_refs[i].set_head(NULL); 1.469 + _discovered_refs[i].set_length(0); 1.470 + } 1.471 + } 1.472 +} 1.473 + 1.474 +void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 1.475 + _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 1.476 + oop discovered = java_lang_ref_Reference::discovered(_ref); 1.477 + assert(_discovered_addr && discovered->is_oop_or_null(), 1.478 + "discovered field is bad"); 1.479 + _next = discovered; 1.480 + _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 1.481 + _referent = java_lang_ref_Reference::referent(_ref); 1.482 + assert(Universe::heap()->is_in_reserved_or_null(_referent), 1.483 + "Wrong oop found in java.lang.Reference object"); 1.484 + assert(allow_null_referent ? 1.485 + _referent->is_oop_or_null() 1.486 + : _referent->is_oop(), 1.487 + "bad referent"); 1.488 +} 1.489 + 1.490 +void DiscoveredListIterator::remove() { 1.491 + assert(_ref->is_oop(), "Dropping a bad reference"); 1.492 + oop_store_raw(_discovered_addr, NULL); 1.493 + 1.494 + // First _prev_next ref actually points into DiscoveredList (gross). 1.495 + oop new_next; 1.496 + if (_next == _ref) { 1.497 + // At the end of the list, we should make _prev point to itself. 1.498 + // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 1.499 + // and _prev will be NULL. 1.500 + new_next = _prev; 1.501 + } else { 1.502 + new_next = _next; 1.503 + } 1.504 + // Remove Reference object from discovered list. Note that G1 does not need a 1.505 + // pre-barrier here because we know the Reference has already been found/marked, 1.506 + // that's how it ended up in the discovered list in the first place. 1.507 + oop_store_raw(_prev_next, new_next); 1.508 + NOT_PRODUCT(_removed++); 1.509 + _refs_list.dec_length(1); 1.510 +} 1.511 + 1.512 +// Make the Reference object active again. 1.513 +void DiscoveredListIterator::make_active() { 1.514 + // The pre barrier for G1 is probably just needed for the old 1.515 + // reference processing behavior. Should we guard this with 1.516 + // ReferenceProcessor::pending_list_uses_discovered_field() ? 1.517 + if (UseG1GC) { 1.518 + HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); 1.519 + if (UseCompressedOops) { 1.520 + oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL); 1.521 + } else { 1.522 + oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL); 1.523 + } 1.524 + } 1.525 + java_lang_ref_Reference::set_next_raw(_ref, NULL); 1.526 +} 1.527 + 1.528 +void DiscoveredListIterator::clear_referent() { 1.529 + oop_store_raw(_referent_addr, NULL); 1.530 +} 1.531 + 1.532 +// NOTE: process_phase*() are largely similar, and at a high level 1.533 +// merely iterate over the extant list applying a predicate to 1.534 +// each of its elements and possibly removing that element from the 1.535 +// list and applying some further closures to that element. 1.536 +// We should consider the possibility of replacing these 1.537 +// process_phase*() methods by abstracting them into 1.538 +// a single general iterator invocation that receives appropriate 1.539 +// closures that accomplish this work. 1.540 + 1.541 +// (SoftReferences only) Traverse the list and remove any SoftReferences whose 1.542 +// referents are not alive, but that should be kept alive for policy reasons. 1.543 +// Keep alive the transitive closure of all such referents. 1.544 +void 1.545 +ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 1.546 + ReferencePolicy* policy, 1.547 + BoolObjectClosure* is_alive, 1.548 + OopClosure* keep_alive, 1.549 + VoidClosure* complete_gc) { 1.550 + assert(policy != NULL, "Must have a non-NULL policy"); 1.551 + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1.552 + // Decide which softly reachable refs should be kept alive. 1.553 + while (iter.has_next()) { 1.554 + iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 1.555 + bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 1.556 + if (referent_is_dead && 1.557 + !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 1.558 + if (TraceReferenceGC) { 1.559 + gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 1.560 + (void *)iter.obj(), iter.obj()->klass()->internal_name()); 1.561 + } 1.562 + // Remove Reference object from list 1.563 + iter.remove(); 1.564 + // Make the Reference object active again 1.565 + iter.make_active(); 1.566 + // keep the referent around 1.567 + iter.make_referent_alive(); 1.568 + iter.move_to_next(); 1.569 + } else { 1.570 + iter.next(); 1.571 + } 1.572 + } 1.573 + // Close the reachable set 1.574 + complete_gc->do_void(); 1.575 + NOT_PRODUCT( 1.576 + if (PrintGCDetails && TraceReferenceGC) { 1.577 + gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 1.578 + "discovered Refs by policy, from list " INTPTR_FORMAT, 1.579 + iter.removed(), iter.processed(), (address)refs_list.head()); 1.580 + } 1.581 + ) 1.582 +} 1.583 + 1.584 +// Traverse the list and remove any Refs that are not active, or 1.585 +// whose referents are either alive or NULL. 1.586 +void 1.587 +ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 1.588 + BoolObjectClosure* is_alive, 1.589 + OopClosure* keep_alive) { 1.590 + assert(discovery_is_atomic(), "Error"); 1.591 + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1.592 + while (iter.has_next()) { 1.593 + iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 1.594 + DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 1.595 + assert(next == NULL, "Should not discover inactive Reference"); 1.596 + if (iter.is_referent_alive()) { 1.597 + if (TraceReferenceGC) { 1.598 + gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 1.599 + (void *)iter.obj(), iter.obj()->klass()->internal_name()); 1.600 + } 1.601 + // The referent is reachable after all. 1.602 + // Remove Reference object from list. 1.603 + iter.remove(); 1.604 + // Update the referent pointer as necessary: Note that this 1.605 + // should not entail any recursive marking because the 1.606 + // referent must already have been traversed. 1.607 + iter.make_referent_alive(); 1.608 + iter.move_to_next(); 1.609 + } else { 1.610 + iter.next(); 1.611 + } 1.612 + } 1.613 + NOT_PRODUCT( 1.614 + if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 1.615 + gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 1.616 + "Refs in discovered list " INTPTR_FORMAT, 1.617 + iter.removed(), iter.processed(), (address)refs_list.head()); 1.618 + } 1.619 + ) 1.620 +} 1.621 + 1.622 +void 1.623 +ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 1.624 + BoolObjectClosure* is_alive, 1.625 + OopClosure* keep_alive, 1.626 + VoidClosure* complete_gc) { 1.627 + assert(!discovery_is_atomic(), "Error"); 1.628 + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1.629 + while (iter.has_next()) { 1.630 + iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1.631 + HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 1.632 + oop next = java_lang_ref_Reference::next(iter.obj()); 1.633 + if ((iter.referent() == NULL || iter.is_referent_alive() || 1.634 + next != NULL)) { 1.635 + assert(next->is_oop_or_null(), "bad next field"); 1.636 + // Remove Reference object from list 1.637 + iter.remove(); 1.638 + // Trace the cohorts 1.639 + iter.make_referent_alive(); 1.640 + if (UseCompressedOops) { 1.641 + keep_alive->do_oop((narrowOop*)next_addr); 1.642 + } else { 1.643 + keep_alive->do_oop((oop*)next_addr); 1.644 + } 1.645 + iter.move_to_next(); 1.646 + } else { 1.647 + iter.next(); 1.648 + } 1.649 + } 1.650 + // Now close the newly reachable set 1.651 + complete_gc->do_void(); 1.652 + NOT_PRODUCT( 1.653 + if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 1.654 + gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 1.655 + "Refs in discovered list " INTPTR_FORMAT, 1.656 + iter.removed(), iter.processed(), (address)refs_list.head()); 1.657 + } 1.658 + ) 1.659 +} 1.660 + 1.661 +// Traverse the list and process the referents, by either 1.662 +// clearing them or keeping them (and their reachable 1.663 +// closure) alive. 1.664 +void 1.665 +ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 1.666 + bool clear_referent, 1.667 + BoolObjectClosure* is_alive, 1.668 + OopClosure* keep_alive, 1.669 + VoidClosure* complete_gc) { 1.670 + ResourceMark rm; 1.671 + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1.672 + while (iter.has_next()) { 1.673 + iter.update_discovered(); 1.674 + iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 1.675 + if (clear_referent) { 1.676 + // NULL out referent pointer 1.677 + iter.clear_referent(); 1.678 + } else { 1.679 + // keep the referent around 1.680 + iter.make_referent_alive(); 1.681 + } 1.682 + if (TraceReferenceGC) { 1.683 + gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 1.684 + clear_referent ? "cleared " : "", 1.685 + (void *)iter.obj(), iter.obj()->klass()->internal_name()); 1.686 + } 1.687 + assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 1.688 + iter.next(); 1.689 + } 1.690 + // Remember to update the next pointer of the last ref. 1.691 + iter.update_discovered(); 1.692 + // Close the reachable set 1.693 + complete_gc->do_void(); 1.694 +} 1.695 + 1.696 +void 1.697 +ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 1.698 + oop obj = NULL; 1.699 + oop next = refs_list.head(); 1.700 + while (next != obj) { 1.701 + obj = next; 1.702 + next = java_lang_ref_Reference::discovered(obj); 1.703 + java_lang_ref_Reference::set_discovered_raw(obj, NULL); 1.704 + } 1.705 + refs_list.set_head(NULL); 1.706 + refs_list.set_length(0); 1.707 +} 1.708 + 1.709 +void 1.710 +ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 1.711 + clear_discovered_references(refs_list); 1.712 +} 1.713 + 1.714 +void ReferenceProcessor::abandon_partial_discovery() { 1.715 + // loop over the lists 1.716 + for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1.717 + if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 1.718 + gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i)); 1.719 + } 1.720 + abandon_partial_discovered_list(_discovered_refs[i]); 1.721 + } 1.722 +} 1.723 + 1.724 +class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 1.725 +public: 1.726 + RefProcPhase1Task(ReferenceProcessor& ref_processor, 1.727 + DiscoveredList refs_lists[], 1.728 + ReferencePolicy* policy, 1.729 + bool marks_oops_alive) 1.730 + : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 1.731 + _policy(policy) 1.732 + { } 1.733 + virtual void work(unsigned int i, BoolObjectClosure& is_alive, 1.734 + OopClosure& keep_alive, 1.735 + VoidClosure& complete_gc) 1.736 + { 1.737 + Thread* thr = Thread::current(); 1.738 + int refs_list_index = ((WorkerThread*)thr)->id(); 1.739 + _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 1.740 + &is_alive, &keep_alive, &complete_gc); 1.741 + } 1.742 +private: 1.743 + ReferencePolicy* _policy; 1.744 +}; 1.745 + 1.746 +class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 1.747 +public: 1.748 + RefProcPhase2Task(ReferenceProcessor& ref_processor, 1.749 + DiscoveredList refs_lists[], 1.750 + bool marks_oops_alive) 1.751 + : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 1.752 + { } 1.753 + virtual void work(unsigned int i, BoolObjectClosure& is_alive, 1.754 + OopClosure& keep_alive, 1.755 + VoidClosure& complete_gc) 1.756 + { 1.757 + _ref_processor.process_phase2(_refs_lists[i], 1.758 + &is_alive, &keep_alive, &complete_gc); 1.759 + } 1.760 +}; 1.761 + 1.762 +class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 1.763 +public: 1.764 + RefProcPhase3Task(ReferenceProcessor& ref_processor, 1.765 + DiscoveredList refs_lists[], 1.766 + bool clear_referent, 1.767 + bool marks_oops_alive) 1.768 + : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 1.769 + _clear_referent(clear_referent) 1.770 + { } 1.771 + virtual void work(unsigned int i, BoolObjectClosure& is_alive, 1.772 + OopClosure& keep_alive, 1.773 + VoidClosure& complete_gc) 1.774 + { 1.775 + // Don't use "refs_list_index" calculated in this way because 1.776 + // balance_queues() has moved the Ref's into the first n queues. 1.777 + // Thread* thr = Thread::current(); 1.778 + // int refs_list_index = ((WorkerThread*)thr)->id(); 1.779 + // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 1.780 + _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 1.781 + &is_alive, &keep_alive, &complete_gc); 1.782 + } 1.783 +private: 1.784 + bool _clear_referent; 1.785 +}; 1.786 + 1.787 +// Balances reference queues. 1.788 +// Move entries from all queues[0, 1, ..., _max_num_q-1] to 1.789 +// queues[0, 1, ..., _num_q-1] because only the first _num_q 1.790 +// corresponding to the active workers will be processed. 1.791 +void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 1.792 +{ 1.793 + // calculate total length 1.794 + size_t total_refs = 0; 1.795 + if (TraceReferenceGC && PrintGCDetails) { 1.796 + gclog_or_tty->print_cr("\nBalance ref_lists "); 1.797 + } 1.798 + 1.799 + for (uint i = 0; i < _max_num_q; ++i) { 1.800 + total_refs += ref_lists[i].length(); 1.801 + if (TraceReferenceGC && PrintGCDetails) { 1.802 + gclog_or_tty->print("%d ", ref_lists[i].length()); 1.803 + } 1.804 + } 1.805 + if (TraceReferenceGC && PrintGCDetails) { 1.806 + gclog_or_tty->print_cr(" = %d", total_refs); 1.807 + } 1.808 + size_t avg_refs = total_refs / _num_q + 1; 1.809 + uint to_idx = 0; 1.810 + for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 1.811 + bool move_all = false; 1.812 + if (from_idx >= _num_q) { 1.813 + move_all = ref_lists[from_idx].length() > 0; 1.814 + } 1.815 + while ((ref_lists[from_idx].length() > avg_refs) || 1.816 + move_all) { 1.817 + assert(to_idx < _num_q, "Sanity Check!"); 1.818 + if (ref_lists[to_idx].length() < avg_refs) { 1.819 + // move superfluous refs 1.820 + size_t refs_to_move; 1.821 + // Move all the Ref's if the from queue will not be processed. 1.822 + if (move_all) { 1.823 + refs_to_move = MIN2(ref_lists[from_idx].length(), 1.824 + avg_refs - ref_lists[to_idx].length()); 1.825 + } else { 1.826 + refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 1.827 + avg_refs - ref_lists[to_idx].length()); 1.828 + } 1.829 + 1.830 + assert(refs_to_move > 0, "otherwise the code below will fail"); 1.831 + 1.832 + oop move_head = ref_lists[from_idx].head(); 1.833 + oop move_tail = move_head; 1.834 + oop new_head = move_head; 1.835 + // find an element to split the list on 1.836 + for (size_t j = 0; j < refs_to_move; ++j) { 1.837 + move_tail = new_head; 1.838 + new_head = java_lang_ref_Reference::discovered(new_head); 1.839 + } 1.840 + 1.841 + // Add the chain to the to list. 1.842 + if (ref_lists[to_idx].head() == NULL) { 1.843 + // to list is empty. Make a loop at the end. 1.844 + java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 1.845 + } else { 1.846 + java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 1.847 + } 1.848 + ref_lists[to_idx].set_head(move_head); 1.849 + ref_lists[to_idx].inc_length(refs_to_move); 1.850 + 1.851 + // Remove the chain from the from list. 1.852 + if (move_tail == new_head) { 1.853 + // We found the end of the from list. 1.854 + ref_lists[from_idx].set_head(NULL); 1.855 + } else { 1.856 + ref_lists[from_idx].set_head(new_head); 1.857 + } 1.858 + ref_lists[from_idx].dec_length(refs_to_move); 1.859 + if (ref_lists[from_idx].length() == 0) { 1.860 + break; 1.861 + } 1.862 + } else { 1.863 + to_idx = (to_idx + 1) % _num_q; 1.864 + } 1.865 + } 1.866 + } 1.867 +#ifdef ASSERT 1.868 + size_t balanced_total_refs = 0; 1.869 + for (uint i = 0; i < _max_num_q; ++i) { 1.870 + balanced_total_refs += ref_lists[i].length(); 1.871 + if (TraceReferenceGC && PrintGCDetails) { 1.872 + gclog_or_tty->print("%d ", ref_lists[i].length()); 1.873 + } 1.874 + } 1.875 + if (TraceReferenceGC && PrintGCDetails) { 1.876 + gclog_or_tty->print_cr(" = %d", balanced_total_refs); 1.877 + gclog_or_tty->flush(); 1.878 + } 1.879 + assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 1.880 +#endif 1.881 +} 1.882 + 1.883 +void ReferenceProcessor::balance_all_queues() { 1.884 + balance_queues(_discoveredSoftRefs); 1.885 + balance_queues(_discoveredWeakRefs); 1.886 + balance_queues(_discoveredFinalRefs); 1.887 + balance_queues(_discoveredPhantomRefs); 1.888 +} 1.889 + 1.890 +size_t 1.891 +ReferenceProcessor::process_discovered_reflist( 1.892 + DiscoveredList refs_lists[], 1.893 + ReferencePolicy* policy, 1.894 + bool clear_referent, 1.895 + BoolObjectClosure* is_alive, 1.896 + OopClosure* keep_alive, 1.897 + VoidClosure* complete_gc, 1.898 + AbstractRefProcTaskExecutor* task_executor) 1.899 +{ 1.900 + bool mt_processing = task_executor != NULL && _processing_is_mt; 1.901 + // If discovery used MT and a dynamic number of GC threads, then 1.902 + // the queues must be balanced for correctness if fewer than the 1.903 + // maximum number of queues were used. The number of queue used 1.904 + // during discovery may be different than the number to be used 1.905 + // for processing so don't depend of _num_q < _max_num_q as part 1.906 + // of the test. 1.907 + bool must_balance = _discovery_is_mt; 1.908 + 1.909 + if ((mt_processing && ParallelRefProcBalancingEnabled) || 1.910 + must_balance) { 1.911 + balance_queues(refs_lists); 1.912 + } 1.913 + 1.914 + size_t total_list_count = total_count(refs_lists); 1.915 + 1.916 + if (PrintReferenceGC && PrintGCDetails) { 1.917 + gclog_or_tty->print(", %u refs", total_list_count); 1.918 + } 1.919 + 1.920 + // Phase 1 (soft refs only): 1.921 + // . Traverse the list and remove any SoftReferences whose 1.922 + // referents are not alive, but that should be kept alive for 1.923 + // policy reasons. Keep alive the transitive closure of all 1.924 + // such referents. 1.925 + if (policy != NULL) { 1.926 + if (mt_processing) { 1.927 + RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 1.928 + task_executor->execute(phase1); 1.929 + } else { 1.930 + for (uint i = 0; i < _max_num_q; i++) { 1.931 + process_phase1(refs_lists[i], policy, 1.932 + is_alive, keep_alive, complete_gc); 1.933 + } 1.934 + } 1.935 + } else { // policy == NULL 1.936 + assert(refs_lists != _discoveredSoftRefs, 1.937 + "Policy must be specified for soft references."); 1.938 + } 1.939 + 1.940 + // Phase 2: 1.941 + // . Traverse the list and remove any refs whose referents are alive. 1.942 + if (mt_processing) { 1.943 + RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 1.944 + task_executor->execute(phase2); 1.945 + } else { 1.946 + for (uint i = 0; i < _max_num_q; i++) { 1.947 + process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 1.948 + } 1.949 + } 1.950 + 1.951 + // Phase 3: 1.952 + // . Traverse the list and process referents as appropriate. 1.953 + if (mt_processing) { 1.954 + RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 1.955 + task_executor->execute(phase3); 1.956 + } else { 1.957 + for (uint i = 0; i < _max_num_q; i++) { 1.958 + process_phase3(refs_lists[i], clear_referent, 1.959 + is_alive, keep_alive, complete_gc); 1.960 + } 1.961 + } 1.962 + 1.963 + return total_list_count; 1.964 +} 1.965 + 1.966 +void ReferenceProcessor::clean_up_discovered_references() { 1.967 + // loop over the lists 1.968 + for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1.969 + if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 1.970 + gclog_or_tty->print_cr( 1.971 + "\nScrubbing %s discovered list of Null referents", 1.972 + list_name(i)); 1.973 + } 1.974 + clean_up_discovered_reflist(_discovered_refs[i]); 1.975 + } 1.976 +} 1.977 + 1.978 +void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 1.979 + assert(!discovery_is_atomic(), "Else why call this method?"); 1.980 + DiscoveredListIterator iter(refs_list, NULL, NULL); 1.981 + while (iter.has_next()) { 1.982 + iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1.983 + oop next = java_lang_ref_Reference::next(iter.obj()); 1.984 + assert(next->is_oop_or_null(), "bad next field"); 1.985 + // If referent has been cleared or Reference is not active, 1.986 + // drop it. 1.987 + if (iter.referent() == NULL || next != NULL) { 1.988 + debug_only( 1.989 + if (PrintGCDetails && TraceReferenceGC) { 1.990 + gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 1.991 + INTPTR_FORMAT " with next field: " INTPTR_FORMAT 1.992 + " and referent: " INTPTR_FORMAT, 1.993 + (void *)iter.obj(), (void *)next, (void *)iter.referent()); 1.994 + } 1.995 + ) 1.996 + // Remove Reference object from list 1.997 + iter.remove(); 1.998 + iter.move_to_next(); 1.999 + } else { 1.1000 + iter.next(); 1.1001 + } 1.1002 + } 1.1003 + NOT_PRODUCT( 1.1004 + if (PrintGCDetails && TraceReferenceGC) { 1.1005 + gclog_or_tty->print( 1.1006 + " Removed %d Refs with NULL referents out of %d discovered Refs", 1.1007 + iter.removed(), iter.processed()); 1.1008 + } 1.1009 + ) 1.1010 +} 1.1011 + 1.1012 +inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 1.1013 + uint id = 0; 1.1014 + // Determine the queue index to use for this object. 1.1015 + if (_discovery_is_mt) { 1.1016 + // During a multi-threaded discovery phase, 1.1017 + // each thread saves to its "own" list. 1.1018 + Thread* thr = Thread::current(); 1.1019 + id = thr->as_Worker_thread()->id(); 1.1020 + } else { 1.1021 + // single-threaded discovery, we save in round-robin 1.1022 + // fashion to each of the lists. 1.1023 + if (_processing_is_mt) { 1.1024 + id = next_id(); 1.1025 + } 1.1026 + } 1.1027 + assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 1.1028 + 1.1029 + // Get the discovered queue to which we will add 1.1030 + DiscoveredList* list = NULL; 1.1031 + switch (rt) { 1.1032 + case REF_OTHER: 1.1033 + // Unknown reference type, no special treatment 1.1034 + break; 1.1035 + case REF_SOFT: 1.1036 + list = &_discoveredSoftRefs[id]; 1.1037 + break; 1.1038 + case REF_WEAK: 1.1039 + list = &_discoveredWeakRefs[id]; 1.1040 + break; 1.1041 + case REF_FINAL: 1.1042 + list = &_discoveredFinalRefs[id]; 1.1043 + break; 1.1044 + case REF_PHANTOM: 1.1045 + list = &_discoveredPhantomRefs[id]; 1.1046 + break; 1.1047 + case REF_NONE: 1.1048 + // we should not reach here if we are an InstanceRefKlass 1.1049 + default: 1.1050 + ShouldNotReachHere(); 1.1051 + } 1.1052 + if (TraceReferenceGC && PrintGCDetails) { 1.1053 + gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list); 1.1054 + } 1.1055 + return list; 1.1056 +} 1.1057 + 1.1058 +inline void 1.1059 +ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1.1060 + oop obj, 1.1061 + HeapWord* discovered_addr) { 1.1062 + assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1.1063 + // First we must make sure this object is only enqueued once. CAS in a non null 1.1064 + // discovered_addr. 1.1065 + oop current_head = refs_list.head(); 1.1066 + // The last ref must have its discovered field pointing to itself. 1.1067 + oop next_discovered = (current_head != NULL) ? current_head : obj; 1.1068 + 1.1069 + oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 1.1070 + NULL); 1.1071 + if (retest == NULL) { 1.1072 + // This thread just won the right to enqueue the object. 1.1073 + // We have separate lists for enqueueing, so no synchronization 1.1074 + // is necessary. 1.1075 + refs_list.set_head(obj); 1.1076 + refs_list.inc_length(1); 1.1077 + 1.1078 + if (TraceReferenceGC) { 1.1079 + gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 1.1080 + (void *)obj, obj->klass()->internal_name()); 1.1081 + } 1.1082 + } else { 1.1083 + // If retest was non NULL, another thread beat us to it: 1.1084 + // The reference has already been discovered... 1.1085 + if (TraceReferenceGC) { 1.1086 + gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1.1087 + (void *)obj, obj->klass()->internal_name()); 1.1088 + } 1.1089 + } 1.1090 +} 1.1091 + 1.1092 +#ifndef PRODUCT 1.1093 +// Non-atomic (i.e. concurrent) discovery might allow us 1.1094 +// to observe j.l.References with NULL referents, being those 1.1095 +// cleared concurrently by mutators during (or after) discovery. 1.1096 +void ReferenceProcessor::verify_referent(oop obj) { 1.1097 + bool da = discovery_is_atomic(); 1.1098 + oop referent = java_lang_ref_Reference::referent(obj); 1.1099 + assert(da ? referent->is_oop() : referent->is_oop_or_null(), 1.1100 + err_msg("Bad referent " INTPTR_FORMAT " found in Reference " 1.1101 + INTPTR_FORMAT " during %satomic discovery ", 1.1102 + (void *)referent, (void *)obj, da ? "" : "non-")); 1.1103 +} 1.1104 +#endif 1.1105 + 1.1106 +// We mention two of several possible choices here: 1.1107 +// #0: if the reference object is not in the "originating generation" 1.1108 +// (or part of the heap being collected, indicated by our "span" 1.1109 +// we don't treat it specially (i.e. we scan it as we would 1.1110 +// a normal oop, treating its references as strong references). 1.1111 +// This means that references can't be discovered unless their 1.1112 +// referent is also in the same span. This is the simplest, 1.1113 +// most "local" and most conservative approach, albeit one 1.1114 +// that may cause weak references to be enqueued least promptly. 1.1115 +// We call this choice the "ReferenceBasedDiscovery" policy. 1.1116 +// #1: the reference object may be in any generation (span), but if 1.1117 +// the referent is in the generation (span) being currently collected 1.1118 +// then we can discover the reference object, provided 1.1119 +// the object has not already been discovered by 1.1120 +// a different concurrently running collector (as may be the 1.1121 +// case, for instance, if the reference object is in CMS and 1.1122 +// the referent in DefNewGeneration), and provided the processing 1.1123 +// of this reference object by the current collector will 1.1124 +// appear atomic to every other collector in the system. 1.1125 +// (Thus, for instance, a concurrent collector may not 1.1126 +// discover references in other generations even if the 1.1127 +// referent is in its own generation). This policy may, 1.1128 +// in certain cases, enqueue references somewhat sooner than 1.1129 +// might Policy #0 above, but at marginally increased cost 1.1130 +// and complexity in processing these references. 1.1131 +// We call this choice the "RefeferentBasedDiscovery" policy. 1.1132 +bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1.1133 + // Make sure we are discovering refs (rather than processing discovered refs). 1.1134 + if (!_discovering_refs || !RegisterReferences) { 1.1135 + return false; 1.1136 + } 1.1137 + // We only discover active references. 1.1138 + oop next = java_lang_ref_Reference::next(obj); 1.1139 + if (next != NULL) { // Ref is no longer active 1.1140 + return false; 1.1141 + } 1.1142 + 1.1143 + HeapWord* obj_addr = (HeapWord*)obj; 1.1144 + if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1.1145 + !_span.contains(obj_addr)) { 1.1146 + // Reference is not in the originating generation; 1.1147 + // don't treat it specially (i.e. we want to scan it as a normal 1.1148 + // object with strong references). 1.1149 + return false; 1.1150 + } 1.1151 + 1.1152 + // We only discover references whose referents are not (yet) 1.1153 + // known to be strongly reachable. 1.1154 + if (is_alive_non_header() != NULL) { 1.1155 + verify_referent(obj); 1.1156 + if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1.1157 + return false; // referent is reachable 1.1158 + } 1.1159 + } 1.1160 + if (rt == REF_SOFT) { 1.1161 + // For soft refs we can decide now if these are not 1.1162 + // current candidates for clearing, in which case we 1.1163 + // can mark through them now, rather than delaying that 1.1164 + // to the reference-processing phase. Since all current 1.1165 + // time-stamp policies advance the soft-ref clock only 1.1166 + // at a major collection cycle, this is always currently 1.1167 + // accurate. 1.1168 + if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1.1169 + return false; 1.1170 + } 1.1171 + } 1.1172 + 1.1173 + ResourceMark rm; // Needed for tracing. 1.1174 + 1.1175 + HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1.1176 + const oop discovered = java_lang_ref_Reference::discovered(obj); 1.1177 + assert(discovered->is_oop_or_null(), "bad discovered field"); 1.1178 + if (discovered != NULL) { 1.1179 + // The reference has already been discovered... 1.1180 + if (TraceReferenceGC) { 1.1181 + gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1.1182 + (void *)obj, obj->klass()->internal_name()); 1.1183 + } 1.1184 + if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1.1185 + // assumes that an object is not processed twice; 1.1186 + // if it's been already discovered it must be on another 1.1187 + // generation's discovered list; so we won't discover it. 1.1188 + return false; 1.1189 + } else { 1.1190 + assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1.1191 + "Unrecognized policy"); 1.1192 + // Check assumption that an object is not potentially 1.1193 + // discovered twice except by concurrent collectors that potentially 1.1194 + // trace the same Reference object twice. 1.1195 + assert(UseConcMarkSweepGC || UseG1GC, 1.1196 + "Only possible with a concurrent marking collector"); 1.1197 + return true; 1.1198 + } 1.1199 + } 1.1200 + 1.1201 + if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1.1202 + verify_referent(obj); 1.1203 + // Discover if and only if EITHER: 1.1204 + // .. reference is in our span, OR 1.1205 + // .. we are an atomic collector and referent is in our span 1.1206 + if (_span.contains(obj_addr) || 1.1207 + (discovery_is_atomic() && 1.1208 + _span.contains(java_lang_ref_Reference::referent(obj)))) { 1.1209 + // should_enqueue = true; 1.1210 + } else { 1.1211 + return false; 1.1212 + } 1.1213 + } else { 1.1214 + assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1.1215 + _span.contains(obj_addr), "code inconsistency"); 1.1216 + } 1.1217 + 1.1218 + // Get the right type of discovered queue head. 1.1219 + DiscoveredList* list = get_discovered_list(rt); 1.1220 + if (list == NULL) { 1.1221 + return false; // nothing special needs to be done 1.1222 + } 1.1223 + 1.1224 + if (_discovery_is_mt) { 1.1225 + add_to_discovered_list_mt(*list, obj, discovered_addr); 1.1226 + } else { 1.1227 + // We do a raw store here: the field will be visited later when processing 1.1228 + // the discovered references. 1.1229 + oop current_head = list->head(); 1.1230 + // The last ref must have its discovered field pointing to itself. 1.1231 + oop next_discovered = (current_head != NULL) ? current_head : obj; 1.1232 + 1.1233 + assert(discovered == NULL, "control point invariant"); 1.1234 + oop_store_raw(discovered_addr, next_discovered); 1.1235 + list->set_head(obj); 1.1236 + list->inc_length(1); 1.1237 + 1.1238 + if (TraceReferenceGC) { 1.1239 + gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", 1.1240 + (void *)obj, obj->klass()->internal_name()); 1.1241 + } 1.1242 + } 1.1243 + assert(obj->is_oop(), "Discovered a bad reference"); 1.1244 + verify_referent(obj); 1.1245 + return true; 1.1246 +} 1.1247 + 1.1248 +// Preclean the discovered references by removing those 1.1249 +// whose referents are alive, and by marking from those that 1.1250 +// are not active. These lists can be handled here 1.1251 +// in any order and, indeed, concurrently. 1.1252 +void ReferenceProcessor::preclean_discovered_references( 1.1253 + BoolObjectClosure* is_alive, 1.1254 + OopClosure* keep_alive, 1.1255 + VoidClosure* complete_gc, 1.1256 + YieldClosure* yield, 1.1257 + GCTimer* gc_timer) { 1.1258 + 1.1259 + NOT_PRODUCT(verify_ok_to_handle_reflists()); 1.1260 + 1.1261 + // Soft references 1.1262 + { 1.1263 + GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1.1264 + false, gc_timer); 1.1265 + for (uint i = 0; i < _max_num_q; i++) { 1.1266 + if (yield->should_return()) { 1.1267 + return; 1.1268 + } 1.1269 + preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1.1270 + keep_alive, complete_gc, yield); 1.1271 + } 1.1272 + } 1.1273 + 1.1274 + // Weak references 1.1275 + { 1.1276 + GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1.1277 + false, gc_timer); 1.1278 + for (uint i = 0; i < _max_num_q; i++) { 1.1279 + if (yield->should_return()) { 1.1280 + return; 1.1281 + } 1.1282 + preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1.1283 + keep_alive, complete_gc, yield); 1.1284 + } 1.1285 + } 1.1286 + 1.1287 + // Final references 1.1288 + { 1.1289 + GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1.1290 + false, gc_timer); 1.1291 + for (uint i = 0; i < _max_num_q; i++) { 1.1292 + if (yield->should_return()) { 1.1293 + return; 1.1294 + } 1.1295 + preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1.1296 + keep_alive, complete_gc, yield); 1.1297 + } 1.1298 + } 1.1299 + 1.1300 + // Phantom references 1.1301 + { 1.1302 + GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1.1303 + false, gc_timer); 1.1304 + for (uint i = 0; i < _max_num_q; i++) { 1.1305 + if (yield->should_return()) { 1.1306 + return; 1.1307 + } 1.1308 + preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1.1309 + keep_alive, complete_gc, yield); 1.1310 + } 1.1311 + } 1.1312 +} 1.1313 + 1.1314 +// Walk the given discovered ref list, and remove all reference objects 1.1315 +// whose referents are still alive, whose referents are NULL or which 1.1316 +// are not active (have a non-NULL next field). NOTE: When we are 1.1317 +// thus precleaning the ref lists (which happens single-threaded today), 1.1318 +// we do not disable refs discovery to honour the correct semantics of 1.1319 +// java.lang.Reference. As a result, we need to be careful below 1.1320 +// that ref removal steps interleave safely with ref discovery steps 1.1321 +// (in this thread). 1.1322 +void 1.1323 +ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1.1324 + BoolObjectClosure* is_alive, 1.1325 + OopClosure* keep_alive, 1.1326 + VoidClosure* complete_gc, 1.1327 + YieldClosure* yield) { 1.1328 + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1.1329 + while (iter.has_next()) { 1.1330 + iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1.1331 + oop obj = iter.obj(); 1.1332 + oop next = java_lang_ref_Reference::next(obj); 1.1333 + if (iter.referent() == NULL || iter.is_referent_alive() || 1.1334 + next != NULL) { 1.1335 + // The referent has been cleared, or is alive, or the Reference is not 1.1336 + // active; we need to trace and mark its cohort. 1.1337 + if (TraceReferenceGC) { 1.1338 + gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1.1339 + (void *)iter.obj(), iter.obj()->klass()->internal_name()); 1.1340 + } 1.1341 + // Remove Reference object from list 1.1342 + iter.remove(); 1.1343 + // Keep alive its cohort. 1.1344 + iter.make_referent_alive(); 1.1345 + if (UseCompressedOops) { 1.1346 + narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1.1347 + keep_alive->do_oop(next_addr); 1.1348 + } else { 1.1349 + oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1.1350 + keep_alive->do_oop(next_addr); 1.1351 + } 1.1352 + iter.move_to_next(); 1.1353 + } else { 1.1354 + iter.next(); 1.1355 + } 1.1356 + } 1.1357 + // Close the reachable set 1.1358 + complete_gc->do_void(); 1.1359 + 1.1360 + NOT_PRODUCT( 1.1361 + if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 1.1362 + gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1.1363 + "Refs in discovered list " INTPTR_FORMAT, 1.1364 + iter.removed(), iter.processed(), (address)refs_list.head()); 1.1365 + } 1.1366 + ) 1.1367 +} 1.1368 + 1.1369 +const char* ReferenceProcessor::list_name(uint i) { 1.1370 + assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(), 1.1371 + "Out of bounds index"); 1.1372 + 1.1373 + int j = i / _max_num_q; 1.1374 + switch (j) { 1.1375 + case 0: return "SoftRef"; 1.1376 + case 1: return "WeakRef"; 1.1377 + case 2: return "FinalRef"; 1.1378 + case 3: return "PhantomRef"; 1.1379 + } 1.1380 + ShouldNotReachHere(); 1.1381 + return NULL; 1.1382 +} 1.1383 + 1.1384 +#ifndef PRODUCT 1.1385 +void ReferenceProcessor::verify_ok_to_handle_reflists() { 1.1386 + // empty for now 1.1387 +} 1.1388 +#endif 1.1389 + 1.1390 +#ifndef PRODUCT 1.1391 +void ReferenceProcessor::clear_discovered_references() { 1.1392 + guarantee(!_discovering_refs, "Discovering refs?"); 1.1393 + for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1.1394 + clear_discovered_references(_discovered_refs[i]); 1.1395 + } 1.1396 +} 1.1397 + 1.1398 +#endif // PRODUCT