Fri, 15 Apr 2011 09:36:28 -0400
7032407: Crash in LinkResolver::runtime_resolve_virtual_method()
Summary: Make CDS reorder vtables so that dump time vtables match run time order, so when redefine classes reinitializes them, they aren't in the wrong order.
Reviewed-by: dcubed, acorn
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "gc_interface/collectedHeap.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "memory/referencePolicy.hpp"
31 #include "memory/referenceProcessor.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/java.hpp"
34 #include "runtime/jniHandles.hpp"
36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
38 oop ReferenceProcessor::_sentinelRef = NULL;
39 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
41 // List of discovered references.
42 class DiscoveredList {
43 public:
44 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
45 oop head() const {
46 return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) :
47 _oop_head;
48 }
49 HeapWord* adr_head() {
50 return UseCompressedOops ? (HeapWord*)&_compressed_head :
51 (HeapWord*)&_oop_head;
52 }
53 void set_head(oop o) {
54 if (UseCompressedOops) {
55 // Must compress the head ptr.
56 _compressed_head = oopDesc::encode_heap_oop_not_null(o);
57 } else {
58 _oop_head = o;
59 }
60 }
61 bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); }
62 size_t length() { return _len; }
63 void set_length(size_t len) { _len = len; }
64 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
65 void dec_length(size_t dec) { _len -= dec; }
66 private:
67 // Set value depending on UseCompressedOops. This could be a template class
68 // but then we have to fix all the instantiations and declarations that use this class.
69 oop _oop_head;
70 narrowOop _compressed_head;
71 size_t _len;
72 };
74 void referenceProcessor_init() {
75 ReferenceProcessor::init_statics();
76 }
78 void ReferenceProcessor::init_statics() {
79 assert(_sentinelRef == NULL, "should be initialized precisely once");
80 EXCEPTION_MARK;
81 _sentinelRef = instanceKlass::cast(
82 SystemDictionary::Reference_klass())->
83 allocate_permanent_instance(THREAD);
85 // Initialize the master soft ref clock.
86 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
88 if (HAS_PENDING_EXCEPTION) {
89 Handle ex(THREAD, PENDING_EXCEPTION);
90 vm_exit_during_initialization(ex);
91 }
92 assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
93 "Just constructed it!");
94 _always_clear_soft_ref_policy = new AlwaysClearPolicy();
95 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
96 NOT_COMPILER2(LRUCurrentHeapPolicy());
97 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
98 vm_exit_during_initialization("Could not allocate reference policy object");
99 }
100 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
101 RefDiscoveryPolicy == ReferentBasedDiscovery,
102 "Unrecongnized RefDiscoveryPolicy");
103 }
105 ReferenceProcessor::ReferenceProcessor(MemRegion span,
106 bool mt_processing,
107 int mt_processing_degree,
108 bool mt_discovery,
109 int mt_discovery_degree,
110 bool atomic_discovery,
111 BoolObjectClosure* is_alive_non_header,
112 bool discovered_list_needs_barrier) :
113 _discovering_refs(false),
114 _enqueuing_is_done(false),
115 _is_alive_non_header(is_alive_non_header),
116 _discovered_list_needs_barrier(discovered_list_needs_barrier),
117 _bs(NULL),
118 _processing_is_mt(mt_processing),
119 _next_id(0)
120 {
121 _span = span;
122 _discovery_is_atomic = atomic_discovery;
123 _discovery_is_mt = mt_discovery;
124 _num_q = MAX2(1, mt_processing_degree);
125 _max_num_q = MAX2(_num_q, mt_discovery_degree);
126 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
127 if (_discoveredSoftRefs == NULL) {
128 vm_exit_during_initialization("Could not allocated RefProc Array");
129 }
130 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q];
131 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
132 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
133 assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
134 // Initialized all entries to _sentinelRef
135 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
136 _discoveredSoftRefs[i].set_head(sentinel_ref());
137 _discoveredSoftRefs[i].set_length(0);
138 }
139 // If we do barreirs, cache a copy of the barrier set.
140 if (discovered_list_needs_barrier) {
141 _bs = Universe::heap()->barrier_set();
142 }
143 setup_policy(false /* default soft ref policy */);
144 }
146 #ifndef PRODUCT
147 void ReferenceProcessor::verify_no_references_recorded() {
148 guarantee(!_discovering_refs, "Discovering refs?");
149 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
150 guarantee(_discoveredSoftRefs[i].empty(),
151 "Found non-empty discovered list");
152 }
153 }
154 #endif
156 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
157 // Should this instead be
158 // for (int i = 0; i < subclasses_of_ref; i++_ {
159 // for (int j = 0; j < _num_q; j++) {
160 // int index = i * _max_num_q + j;
161 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
162 if (UseCompressedOops) {
163 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
164 } else {
165 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
166 }
167 }
168 }
170 void ReferenceProcessor::oops_do(OopClosure* f) {
171 f->do_oop(adr_sentinel_ref());
172 }
174 void ReferenceProcessor::update_soft_ref_master_clock() {
175 // Update (advance) the soft ref master clock field. This must be done
176 // after processing the soft ref list.
177 jlong now = os::javaTimeMillis();
178 jlong clock = java_lang_ref_SoftReference::clock();
179 NOT_PRODUCT(
180 if (now < clock) {
181 warning("time warp: %d to %d", clock, now);
182 }
183 )
184 // In product mode, protect ourselves from system time being adjusted
185 // externally and going backward; see note in the implementation of
186 // GenCollectedHeap::time_since_last_gc() for the right way to fix
187 // this uniformly throughout the VM; see bug-id 4741166. XXX
188 if (now > clock) {
189 java_lang_ref_SoftReference::set_clock(now);
190 }
191 // Else leave clock stalled at its old value until time progresses
192 // past clock value.
193 }
195 void ReferenceProcessor::process_discovered_references(
196 BoolObjectClosure* is_alive,
197 OopClosure* keep_alive,
198 VoidClosure* complete_gc,
199 AbstractRefProcTaskExecutor* task_executor) {
200 NOT_PRODUCT(verify_ok_to_handle_reflists());
202 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
203 // Stop treating discovered references specially.
204 disable_discovery();
206 bool trace_time = PrintGCDetails && PrintReferenceGC;
207 // Soft references
208 {
209 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
210 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
211 is_alive, keep_alive, complete_gc, task_executor);
212 }
214 update_soft_ref_master_clock();
216 // Weak references
217 {
218 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
219 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
220 is_alive, keep_alive, complete_gc, task_executor);
221 }
223 // Final references
224 {
225 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
226 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
227 is_alive, keep_alive, complete_gc, task_executor);
228 }
230 // Phantom references
231 {
232 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
233 process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
234 is_alive, keep_alive, complete_gc, task_executor);
235 }
237 // Weak global JNI references. It would make more sense (semantically) to
238 // traverse these simultaneously with the regular weak references above, but
239 // that is not how the JDK1.2 specification is. See #4126360. Native code can
240 // thus use JNI weak references to circumvent the phantom references and
241 // resurrect a "post-mortem" object.
242 {
243 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
244 if (task_executor != NULL) {
245 task_executor->set_single_threaded_mode();
246 }
247 process_phaseJNI(is_alive, keep_alive, complete_gc);
248 }
249 }
251 #ifndef PRODUCT
252 // Calculate the number of jni handles.
253 uint ReferenceProcessor::count_jni_refs() {
254 class AlwaysAliveClosure: public BoolObjectClosure {
255 public:
256 virtual bool do_object_b(oop obj) { return true; }
257 virtual void do_object(oop obj) { assert(false, "Don't call"); }
258 };
260 class CountHandleClosure: public OopClosure {
261 private:
262 int _count;
263 public:
264 CountHandleClosure(): _count(0) {}
265 void do_oop(oop* unused) { _count++; }
266 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
267 int count() { return _count; }
268 };
269 CountHandleClosure global_handle_count;
270 AlwaysAliveClosure always_alive;
271 JNIHandles::weak_oops_do(&always_alive, &global_handle_count);
272 return global_handle_count.count();
273 }
274 #endif
276 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
277 OopClosure* keep_alive,
278 VoidClosure* complete_gc) {
279 #ifndef PRODUCT
280 if (PrintGCDetails && PrintReferenceGC) {
281 unsigned int count = count_jni_refs();
282 gclog_or_tty->print(", %u refs", count);
283 }
284 #endif
285 JNIHandles::weak_oops_do(is_alive, keep_alive);
286 // Finally remember to keep sentinel around
287 keep_alive->do_oop(adr_sentinel_ref());
288 complete_gc->do_void();
289 }
292 template <class T>
293 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
294 AbstractRefProcTaskExecutor* task_executor) {
296 // Remember old value of pending references list
297 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
298 T old_pending_list_value = *pending_list_addr;
300 // Enqueue references that are not made active again, and
301 // clear the decks for the next collection (cycle).
302 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
303 // Do the oop-check on pending_list_addr missed in
304 // enqueue_discovered_reflist. We should probably
305 // do a raw oop_check so that future such idempotent
306 // oop_stores relying on the oop-check side-effect
307 // may be elided automatically and safely without
308 // affecting correctness.
309 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
311 // Stop treating discovered references specially.
312 ref->disable_discovery();
314 // Return true if new pending references were added
315 return old_pending_list_value != *pending_list_addr;
316 }
318 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
319 NOT_PRODUCT(verify_ok_to_handle_reflists());
320 if (UseCompressedOops) {
321 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
322 } else {
323 return enqueue_discovered_ref_helper<oop>(this, task_executor);
324 }
325 }
327 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
328 HeapWord* pending_list_addr) {
329 // Given a list of refs linked through the "discovered" field
330 // (java.lang.ref.Reference.discovered) chain them through the
331 // "next" field (java.lang.ref.Reference.next) and prepend
332 // to the pending list.
333 if (TraceReferenceGC && PrintGCDetails) {
334 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
335 INTPTR_FORMAT, (address)refs_list.head());
336 }
337 oop obj = refs_list.head();
338 // Walk down the list, copying the discovered field into
339 // the next field and clearing it (except for the last
340 // non-sentinel object which is treated specially to avoid
341 // confusion with an active reference).
342 while (obj != sentinel_ref()) {
343 assert(obj->is_instanceRef(), "should be reference object");
344 oop next = java_lang_ref_Reference::discovered(obj);
345 if (TraceReferenceGC && PrintGCDetails) {
346 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
347 obj, next);
348 }
349 assert(java_lang_ref_Reference::next(obj) == NULL,
350 "The reference should not be enqueued");
351 if (next == sentinel_ref()) { // obj is last
352 // Swap refs_list into pendling_list_addr and
353 // set obj's next to what we read from pending_list_addr.
354 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
355 // Need oop_check on pending_list_addr above;
356 // see special oop-check code at the end of
357 // enqueue_discovered_reflists() further below.
358 if (old == NULL) {
359 // obj should be made to point to itself, since
360 // pending list was empty.
361 java_lang_ref_Reference::set_next(obj, obj);
362 } else {
363 java_lang_ref_Reference::set_next(obj, old);
364 }
365 } else {
366 java_lang_ref_Reference::set_next(obj, next);
367 }
368 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
369 obj = next;
370 }
371 }
373 // Parallel enqueue task
374 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
375 public:
376 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
377 DiscoveredList discovered_refs[],
378 HeapWord* pending_list_addr,
379 oop sentinel_ref,
380 int n_queues)
381 : EnqueueTask(ref_processor, discovered_refs,
382 pending_list_addr, sentinel_ref, n_queues)
383 { }
385 virtual void work(unsigned int work_id) {
386 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
387 // Simplest first cut: static partitioning.
388 int index = work_id;
389 // The increment on "index" must correspond to the maximum number of queues
390 // (n_queues) with which that ReferenceProcessor was created. That
391 // is because of the "clever" way the discovered references lists were
392 // allocated and are indexed into.
393 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
394 for (int j = 0;
395 j < subclasses_of_ref;
396 j++, index += _n_queues) {
397 _ref_processor.enqueue_discovered_reflist(
398 _refs_lists[index], _pending_list_addr);
399 _refs_lists[index].set_head(_sentinel_ref);
400 _refs_lists[index].set_length(0);
401 }
402 }
403 };
405 // Enqueue references that are not made active again
406 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
407 AbstractRefProcTaskExecutor* task_executor) {
408 if (_processing_is_mt && task_executor != NULL) {
409 // Parallel code
410 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
411 pending_list_addr, sentinel_ref(), _max_num_q);
412 task_executor->execute(tsk);
413 } else {
414 // Serial code: call the parent class's implementation
415 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
416 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
417 _discoveredSoftRefs[i].set_head(sentinel_ref());
418 _discoveredSoftRefs[i].set_length(0);
419 }
420 }
421 }
423 // Iterator for the list of discovered references.
424 class DiscoveredListIterator {
425 public:
426 inline DiscoveredListIterator(DiscoveredList& refs_list,
427 OopClosure* keep_alive,
428 BoolObjectClosure* is_alive);
430 // End Of List.
431 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
433 // Get oop to the Reference object.
434 inline oop obj() const { return _ref; }
436 // Get oop to the referent object.
437 inline oop referent() const { return _referent; }
439 // Returns true if referent is alive.
440 inline bool is_referent_alive() const;
442 // Loads data for the current reference.
443 // The "allow_null_referent" argument tells us to allow for the possibility
444 // of a NULL referent in the discovered Reference object. This typically
445 // happens in the case of concurrent collectors that may have done the
446 // discovery concurrently, or interleaved, with mutator execution.
447 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
449 // Move to the next discovered reference.
450 inline void next();
452 // Remove the current reference from the list
453 inline void remove();
455 // Make the Reference object active again.
456 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
458 // Make the referent alive.
459 inline void make_referent_alive() {
460 if (UseCompressedOops) {
461 _keep_alive->do_oop((narrowOop*)_referent_addr);
462 } else {
463 _keep_alive->do_oop((oop*)_referent_addr);
464 }
465 }
467 // Update the discovered field.
468 inline void update_discovered() {
469 // First _prev_next ref actually points into DiscoveredList (gross).
470 if (UseCompressedOops) {
471 _keep_alive->do_oop((narrowOop*)_prev_next);
472 } else {
473 _keep_alive->do_oop((oop*)_prev_next);
474 }
475 }
477 // NULL out referent pointer.
478 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
480 // Statistics
481 NOT_PRODUCT(
482 inline size_t processed() const { return _processed; }
483 inline size_t removed() const { return _removed; }
484 )
486 inline void move_to_next();
488 private:
489 DiscoveredList& _refs_list;
490 HeapWord* _prev_next;
491 oop _ref;
492 HeapWord* _discovered_addr;
493 oop _next;
494 HeapWord* _referent_addr;
495 oop _referent;
496 OopClosure* _keep_alive;
497 BoolObjectClosure* _is_alive;
498 DEBUG_ONLY(
499 oop _first_seen; // cyclic linked list check
500 )
501 NOT_PRODUCT(
502 size_t _processed;
503 size_t _removed;
504 )
505 };
507 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
508 OopClosure* keep_alive,
509 BoolObjectClosure* is_alive)
510 : _refs_list(refs_list),
511 _prev_next(refs_list.adr_head()),
512 _ref(refs_list.head()),
513 #ifdef ASSERT
514 _first_seen(refs_list.head()),
515 #endif
516 #ifndef PRODUCT
517 _processed(0),
518 _removed(0),
519 #endif
520 _next(refs_list.head()),
521 _keep_alive(keep_alive),
522 _is_alive(is_alive)
523 { }
525 inline bool DiscoveredListIterator::is_referent_alive() const {
526 return _is_alive->do_object_b(_referent);
527 }
529 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
530 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
531 oop discovered = java_lang_ref_Reference::discovered(_ref);
532 assert(_discovered_addr && discovered->is_oop_or_null(),
533 "discovered field is bad");
534 _next = discovered;
535 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
536 _referent = java_lang_ref_Reference::referent(_ref);
537 assert(Universe::heap()->is_in_reserved_or_null(_referent),
538 "Wrong oop found in java.lang.Reference object");
539 assert(allow_null_referent ?
540 _referent->is_oop_or_null()
541 : _referent->is_oop(),
542 "bad referent");
543 }
545 inline void DiscoveredListIterator::next() {
546 _prev_next = _discovered_addr;
547 move_to_next();
548 }
550 inline void DiscoveredListIterator::remove() {
551 assert(_ref->is_oop(), "Dropping a bad reference");
552 oop_store_raw(_discovered_addr, NULL);
553 // First _prev_next ref actually points into DiscoveredList (gross).
554 if (UseCompressedOops) {
555 // Remove Reference object from list.
556 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
557 } else {
558 // Remove Reference object from list.
559 oopDesc::store_heap_oop((oop*)_prev_next, _next);
560 }
561 NOT_PRODUCT(_removed++);
562 _refs_list.dec_length(1);
563 }
565 inline void DiscoveredListIterator::move_to_next() {
566 _ref = _next;
567 assert(_ref != _first_seen, "cyclic ref_list found");
568 NOT_PRODUCT(_processed++);
569 }
571 // NOTE: process_phase*() are largely similar, and at a high level
572 // merely iterate over the extant list applying a predicate to
573 // each of its elements and possibly removing that element from the
574 // list and applying some further closures to that element.
575 // We should consider the possibility of replacing these
576 // process_phase*() methods by abstracting them into
577 // a single general iterator invocation that receives appropriate
578 // closures that accomplish this work.
580 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
581 // referents are not alive, but that should be kept alive for policy reasons.
582 // Keep alive the transitive closure of all such referents.
583 void
584 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
585 ReferencePolicy* policy,
586 BoolObjectClosure* is_alive,
587 OopClosure* keep_alive,
588 VoidClosure* complete_gc) {
589 assert(policy != NULL, "Must have a non-NULL policy");
590 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
591 // Decide which softly reachable refs should be kept alive.
592 while (iter.has_next()) {
593 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
594 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
595 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
596 if (TraceReferenceGC) {
597 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
598 iter.obj(), iter.obj()->blueprint()->internal_name());
599 }
600 // Remove Reference object from list
601 iter.remove();
602 // Make the Reference object active again
603 iter.make_active();
604 // keep the referent around
605 iter.make_referent_alive();
606 iter.move_to_next();
607 } else {
608 iter.next();
609 }
610 }
611 // Close the reachable set
612 complete_gc->do_void();
613 NOT_PRODUCT(
614 if (PrintGCDetails && TraceReferenceGC) {
615 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
616 "discovered Refs by policy list " INTPTR_FORMAT,
617 iter.removed(), iter.processed(), (address)refs_list.head());
618 }
619 )
620 }
622 // Traverse the list and remove any Refs that are not active, or
623 // whose referents are either alive or NULL.
624 void
625 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
626 BoolObjectClosure* is_alive,
627 OopClosure* keep_alive) {
628 assert(discovery_is_atomic(), "Error");
629 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
630 while (iter.has_next()) {
631 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
632 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
633 assert(next == NULL, "Should not discover inactive Reference");
634 if (iter.is_referent_alive()) {
635 if (TraceReferenceGC) {
636 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
637 iter.obj(), iter.obj()->blueprint()->internal_name());
638 }
639 // The referent is reachable after all.
640 // Remove Reference object from list.
641 iter.remove();
642 // Update the referent pointer as necessary: Note that this
643 // should not entail any recursive marking because the
644 // referent must already have been traversed.
645 iter.make_referent_alive();
646 iter.move_to_next();
647 } else {
648 iter.next();
649 }
650 }
651 NOT_PRODUCT(
652 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
653 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
654 "Refs in discovered list " INTPTR_FORMAT,
655 iter.removed(), iter.processed(), (address)refs_list.head());
656 }
657 )
658 }
660 void
661 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
662 BoolObjectClosure* is_alive,
663 OopClosure* keep_alive,
664 VoidClosure* complete_gc) {
665 assert(!discovery_is_atomic(), "Error");
666 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
667 while (iter.has_next()) {
668 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
669 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
670 oop next = java_lang_ref_Reference::next(iter.obj());
671 if ((iter.referent() == NULL || iter.is_referent_alive() ||
672 next != NULL)) {
673 assert(next->is_oop_or_null(), "bad next field");
674 // Remove Reference object from list
675 iter.remove();
676 // Trace the cohorts
677 iter.make_referent_alive();
678 if (UseCompressedOops) {
679 keep_alive->do_oop((narrowOop*)next_addr);
680 } else {
681 keep_alive->do_oop((oop*)next_addr);
682 }
683 iter.move_to_next();
684 } else {
685 iter.next();
686 }
687 }
688 // Now close the newly reachable set
689 complete_gc->do_void();
690 NOT_PRODUCT(
691 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
692 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
693 "Refs in discovered list " INTPTR_FORMAT,
694 iter.removed(), iter.processed(), (address)refs_list.head());
695 }
696 )
697 }
699 // Traverse the list and process the referents, by either
700 // clearing them or keeping them (and their reachable
701 // closure) alive.
702 void
703 ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
704 bool clear_referent,
705 BoolObjectClosure* is_alive,
706 OopClosure* keep_alive,
707 VoidClosure* complete_gc) {
708 ResourceMark rm;
709 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
710 while (iter.has_next()) {
711 iter.update_discovered();
712 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
713 if (clear_referent) {
714 // NULL out referent pointer
715 iter.clear_referent();
716 } else {
717 // keep the referent around
718 iter.make_referent_alive();
719 }
720 if (TraceReferenceGC) {
721 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
722 clear_referent ? "cleared " : "",
723 iter.obj(), iter.obj()->blueprint()->internal_name());
724 }
725 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
726 iter.next();
727 }
728 // Remember to keep sentinel pointer around
729 iter.update_discovered();
730 // Close the reachable set
731 complete_gc->do_void();
732 }
734 void
735 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
736 oop obj = refs_list.head();
737 while (obj != sentinel_ref()) {
738 oop discovered = java_lang_ref_Reference::discovered(obj);
739 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
740 obj = discovered;
741 }
742 refs_list.set_head(sentinel_ref());
743 refs_list.set_length(0);
744 }
746 void ReferenceProcessor::abandon_partial_discovery() {
747 // loop over the lists
748 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
749 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
750 gclog_or_tty->print_cr("\nAbandoning %s discovered list",
751 list_name(i));
752 }
753 abandon_partial_discovered_list(_discoveredSoftRefs[i]);
754 }
755 }
757 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
758 public:
759 RefProcPhase1Task(ReferenceProcessor& ref_processor,
760 DiscoveredList refs_lists[],
761 ReferencePolicy* policy,
762 bool marks_oops_alive)
763 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
764 _policy(policy)
765 { }
766 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
767 OopClosure& keep_alive,
768 VoidClosure& complete_gc)
769 {
770 Thread* thr = Thread::current();
771 int refs_list_index = ((WorkerThread*)thr)->id();
772 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy,
773 &is_alive, &keep_alive, &complete_gc);
774 }
775 private:
776 ReferencePolicy* _policy;
777 };
779 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
780 public:
781 RefProcPhase2Task(ReferenceProcessor& ref_processor,
782 DiscoveredList refs_lists[],
783 bool marks_oops_alive)
784 : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
785 { }
786 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
787 OopClosure& keep_alive,
788 VoidClosure& complete_gc)
789 {
790 _ref_processor.process_phase2(_refs_lists[i],
791 &is_alive, &keep_alive, &complete_gc);
792 }
793 };
795 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
796 public:
797 RefProcPhase3Task(ReferenceProcessor& ref_processor,
798 DiscoveredList refs_lists[],
799 bool clear_referent,
800 bool marks_oops_alive)
801 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
802 _clear_referent(clear_referent)
803 { }
804 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
805 OopClosure& keep_alive,
806 VoidClosure& complete_gc)
807 {
808 // Don't use "refs_list_index" calculated in this way because
809 // balance_queues() has moved the Ref's into the first n queues.
810 // Thread* thr = Thread::current();
811 // int refs_list_index = ((WorkerThread*)thr)->id();
812 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent,
813 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
814 &is_alive, &keep_alive, &complete_gc);
815 }
816 private:
817 bool _clear_referent;
818 };
820 // Balances reference queues.
821 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
822 // queues[0, 1, ..., _num_q-1] because only the first _num_q
823 // corresponding to the active workers will be processed.
824 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
825 {
826 // calculate total length
827 size_t total_refs = 0;
828 if (TraceReferenceGC && PrintGCDetails) {
829 gclog_or_tty->print_cr("\nBalance ref_lists ");
830 }
832 for (int i = 0; i < _max_num_q; ++i) {
833 total_refs += ref_lists[i].length();
834 if (TraceReferenceGC && PrintGCDetails) {
835 gclog_or_tty->print("%d ", ref_lists[i].length());
836 }
837 }
838 if (TraceReferenceGC && PrintGCDetails) {
839 gclog_or_tty->print_cr(" = %d", total_refs);
840 }
841 size_t avg_refs = total_refs / _num_q + 1;
842 int to_idx = 0;
843 for (int from_idx = 0; from_idx < _max_num_q; from_idx++) {
844 bool move_all = false;
845 if (from_idx >= _num_q) {
846 move_all = ref_lists[from_idx].length() > 0;
847 }
848 while ((ref_lists[from_idx].length() > avg_refs) ||
849 move_all) {
850 assert(to_idx < _num_q, "Sanity Check!");
851 if (ref_lists[to_idx].length() < avg_refs) {
852 // move superfluous refs
853 size_t refs_to_move;
854 // Move all the Ref's if the from queue will not be processed.
855 if (move_all) {
856 refs_to_move = MIN2(ref_lists[from_idx].length(),
857 avg_refs - ref_lists[to_idx].length());
858 } else {
859 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
860 avg_refs - ref_lists[to_idx].length());
861 }
862 oop move_head = ref_lists[from_idx].head();
863 oop move_tail = move_head;
864 oop new_head = move_head;
865 // find an element to split the list on
866 for (size_t j = 0; j < refs_to_move; ++j) {
867 move_tail = new_head;
868 new_head = java_lang_ref_Reference::discovered(new_head);
869 }
870 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
871 ref_lists[to_idx].set_head(move_head);
872 ref_lists[to_idx].inc_length(refs_to_move);
873 ref_lists[from_idx].set_head(new_head);
874 ref_lists[from_idx].dec_length(refs_to_move);
875 if (ref_lists[from_idx].length() == 0) {
876 break;
877 }
878 } else {
879 to_idx = (to_idx + 1) % _num_q;
880 }
881 }
882 }
883 #ifdef ASSERT
884 size_t balanced_total_refs = 0;
885 for (int i = 0; i < _max_num_q; ++i) {
886 balanced_total_refs += ref_lists[i].length();
887 if (TraceReferenceGC && PrintGCDetails) {
888 gclog_or_tty->print("%d ", ref_lists[i].length());
889 }
890 }
891 if (TraceReferenceGC && PrintGCDetails) {
892 gclog_or_tty->print_cr(" = %d", balanced_total_refs);
893 gclog_or_tty->flush();
894 }
895 assert(total_refs == balanced_total_refs, "Balancing was incomplete");
896 #endif
897 }
899 void ReferenceProcessor::balance_all_queues() {
900 balance_queues(_discoveredSoftRefs);
901 balance_queues(_discoveredWeakRefs);
902 balance_queues(_discoveredFinalRefs);
903 balance_queues(_discoveredPhantomRefs);
904 }
906 void
907 ReferenceProcessor::process_discovered_reflist(
908 DiscoveredList refs_lists[],
909 ReferencePolicy* policy,
910 bool clear_referent,
911 BoolObjectClosure* is_alive,
912 OopClosure* keep_alive,
913 VoidClosure* complete_gc,
914 AbstractRefProcTaskExecutor* task_executor)
915 {
916 bool mt_processing = task_executor != NULL && _processing_is_mt;
917 // If discovery used MT and a dynamic number of GC threads, then
918 // the queues must be balanced for correctness if fewer than the
919 // maximum number of queues were used. The number of queue used
920 // during discovery may be different than the number to be used
921 // for processing so don't depend of _num_q < _max_num_q as part
922 // of the test.
923 bool must_balance = _discovery_is_mt;
925 if ((mt_processing && ParallelRefProcBalancingEnabled) ||
926 must_balance) {
927 balance_queues(refs_lists);
928 }
929 if (PrintReferenceGC && PrintGCDetails) {
930 size_t total = 0;
931 for (int i = 0; i < _max_num_q; ++i) {
932 total += refs_lists[i].length();
933 }
934 gclog_or_tty->print(", %u refs", total);
935 }
937 // Phase 1 (soft refs only):
938 // . Traverse the list and remove any SoftReferences whose
939 // referents are not alive, but that should be kept alive for
940 // policy reasons. Keep alive the transitive closure of all
941 // such referents.
942 if (policy != NULL) {
943 if (mt_processing) {
944 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
945 task_executor->execute(phase1);
946 } else {
947 for (int i = 0; i < _max_num_q; i++) {
948 process_phase1(refs_lists[i], policy,
949 is_alive, keep_alive, complete_gc);
950 }
951 }
952 } else { // policy == NULL
953 assert(refs_lists != _discoveredSoftRefs,
954 "Policy must be specified for soft references.");
955 }
957 // Phase 2:
958 // . Traverse the list and remove any refs whose referents are alive.
959 if (mt_processing) {
960 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
961 task_executor->execute(phase2);
962 } else {
963 for (int i = 0; i < _max_num_q; i++) {
964 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
965 }
966 }
968 // Phase 3:
969 // . Traverse the list and process referents as appropriate.
970 if (mt_processing) {
971 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
972 task_executor->execute(phase3);
973 } else {
974 for (int i = 0; i < _max_num_q; i++) {
975 process_phase3(refs_lists[i], clear_referent,
976 is_alive, keep_alive, complete_gc);
977 }
978 }
979 }
981 void ReferenceProcessor::clean_up_discovered_references() {
982 // loop over the lists
983 // Should this instead be
984 // for (int i = 0; i < subclasses_of_ref; i++_ {
985 // for (int j = 0; j < _num_q; j++) {
986 // int index = i * _max_num_q + j;
987 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
988 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
989 gclog_or_tty->print_cr(
990 "\nScrubbing %s discovered list of Null referents",
991 list_name(i));
992 }
993 clean_up_discovered_reflist(_discoveredSoftRefs[i]);
994 }
995 }
997 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
998 assert(!discovery_is_atomic(), "Else why call this method?");
999 DiscoveredListIterator iter(refs_list, NULL, NULL);
1000 while (iter.has_next()) {
1001 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1002 oop next = java_lang_ref_Reference::next(iter.obj());
1003 assert(next->is_oop_or_null(), "bad next field");
1004 // If referent has been cleared or Reference is not active,
1005 // drop it.
1006 if (iter.referent() == NULL || next != NULL) {
1007 debug_only(
1008 if (PrintGCDetails && TraceReferenceGC) {
1009 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
1010 INTPTR_FORMAT " with next field: " INTPTR_FORMAT
1011 " and referent: " INTPTR_FORMAT,
1012 iter.obj(), next, iter.referent());
1013 }
1014 )
1015 // Remove Reference object from list
1016 iter.remove();
1017 iter.move_to_next();
1018 } else {
1019 iter.next();
1020 }
1021 }
1022 NOT_PRODUCT(
1023 if (PrintGCDetails && TraceReferenceGC) {
1024 gclog_or_tty->print(
1025 " Removed %d Refs with NULL referents out of %d discovered Refs",
1026 iter.removed(), iter.processed());
1027 }
1028 )
1029 }
1031 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
1032 int id = 0;
1033 // Determine the queue index to use for this object.
1034 if (_discovery_is_mt) {
1035 // During a multi-threaded discovery phase,
1036 // each thread saves to its "own" list.
1037 Thread* thr = Thread::current();
1038 id = thr->as_Worker_thread()->id();
1039 } else {
1040 // single-threaded discovery, we save in round-robin
1041 // fashion to each of the lists.
1042 if (_processing_is_mt) {
1043 id = next_id();
1044 }
1045 }
1046 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)");
1048 // Get the discovered queue to which we will add
1049 DiscoveredList* list = NULL;
1050 switch (rt) {
1051 case REF_OTHER:
1052 // Unknown reference type, no special treatment
1053 break;
1054 case REF_SOFT:
1055 list = &_discoveredSoftRefs[id];
1056 break;
1057 case REF_WEAK:
1058 list = &_discoveredWeakRefs[id];
1059 break;
1060 case REF_FINAL:
1061 list = &_discoveredFinalRefs[id];
1062 break;
1063 case REF_PHANTOM:
1064 list = &_discoveredPhantomRefs[id];
1065 break;
1066 case REF_NONE:
1067 // we should not reach here if we are an instanceRefKlass
1068 default:
1069 ShouldNotReachHere();
1070 }
1071 if (TraceReferenceGC && PrintGCDetails) {
1072 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list);
1073 }
1074 return list;
1075 }
1077 inline void
1078 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1079 oop obj,
1080 HeapWord* discovered_addr) {
1081 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1082 // First we must make sure this object is only enqueued once. CAS in a non null
1083 // discovered_addr.
1084 oop current_head = refs_list.head();
1086 // Note: In the case of G1, this specific pre-barrier is strictly
1087 // not necessary because the only case we are interested in
1088 // here is when *discovered_addr is NULL (see the CAS further below),
1089 // so this will expand to nothing. As a result, we have manually
1090 // elided this out for G1, but left in the test for some future
1091 // collector that might have need for a pre-barrier here.
1092 if (_discovered_list_needs_barrier && !UseG1GC) {
1093 if (UseCompressedOops) {
1094 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
1095 } else {
1096 _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
1097 }
1098 guarantee(false, "Need to check non-G1 collector");
1099 }
1100 oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
1101 NULL);
1102 if (retest == NULL) {
1103 // This thread just won the right to enqueue the object.
1104 // We have separate lists for enqueueing so no synchronization
1105 // is necessary.
1106 refs_list.set_head(obj);
1107 refs_list.inc_length(1);
1108 if (_discovered_list_needs_barrier) {
1109 _bs->write_ref_field((void*)discovered_addr, current_head);
1110 }
1112 if (TraceReferenceGC) {
1113 gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
1114 obj, obj->blueprint()->internal_name());
1115 }
1116 } else {
1117 // If retest was non NULL, another thread beat us to it:
1118 // The reference has already been discovered...
1119 if (TraceReferenceGC) {
1120 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1121 obj, obj->blueprint()->internal_name());
1122 }
1123 }
1124 }
1126 #ifndef PRODUCT
1127 // Non-atomic (i.e. concurrent) discovery might allow us
1128 // to observe j.l.References with NULL referents, being those
1129 // cleared concurrently by mutators during (or after) discovery.
1130 void ReferenceProcessor::verify_referent(oop obj) {
1131 bool da = discovery_is_atomic();
1132 oop referent = java_lang_ref_Reference::referent(obj);
1133 assert(da ? referent->is_oop() : referent->is_oop_or_null(),
1134 err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
1135 INTPTR_FORMAT " during %satomic discovery ",
1136 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
1137 }
1138 #endif
1140 // We mention two of several possible choices here:
1141 // #0: if the reference object is not in the "originating generation"
1142 // (or part of the heap being collected, indicated by our "span"
1143 // we don't treat it specially (i.e. we scan it as we would
1144 // a normal oop, treating its references as strong references).
1145 // This means that references can't be enqueued unless their
1146 // referent is also in the same span. This is the simplest,
1147 // most "local" and most conservative approach, albeit one
1148 // that may cause weak references to be enqueued least promptly.
1149 // We call this choice the "ReferenceBasedDiscovery" policy.
1150 // #1: the reference object may be in any generation (span), but if
1151 // the referent is in the generation (span) being currently collected
1152 // then we can discover the reference object, provided
1153 // the object has not already been discovered by
1154 // a different concurrently running collector (as may be the
1155 // case, for instance, if the reference object is in CMS and
1156 // the referent in DefNewGeneration), and provided the processing
1157 // of this reference object by the current collector will
1158 // appear atomic to every other collector in the system.
1159 // (Thus, for instance, a concurrent collector may not
1160 // discover references in other generations even if the
1161 // referent is in its own generation). This policy may,
1162 // in certain cases, enqueue references somewhat sooner than
1163 // might Policy #0 above, but at marginally increased cost
1164 // and complexity in processing these references.
1165 // We call this choice the "RefeferentBasedDiscovery" policy.
1166 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1167 // We enqueue references only if we are discovering refs
1168 // (rather than processing discovered refs).
1169 if (!_discovering_refs || !RegisterReferences) {
1170 return false;
1171 }
1172 // We only enqueue active references.
1173 oop next = java_lang_ref_Reference::next(obj);
1174 if (next != NULL) {
1175 return false;
1176 }
1178 HeapWord* obj_addr = (HeapWord*)obj;
1179 if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1180 !_span.contains(obj_addr)) {
1181 // Reference is not in the originating generation;
1182 // don't treat it specially (i.e. we want to scan it as a normal
1183 // object with strong references).
1184 return false;
1185 }
1187 // We only enqueue references whose referents are not (yet) strongly
1188 // reachable.
1189 if (is_alive_non_header() != NULL) {
1190 verify_referent(obj);
1191 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1192 return false; // referent is reachable
1193 }
1194 }
1195 if (rt == REF_SOFT) {
1196 // For soft refs we can decide now if these are not
1197 // current candidates for clearing, in which case we
1198 // can mark through them now, rather than delaying that
1199 // to the reference-processing phase. Since all current
1200 // time-stamp policies advance the soft-ref clock only
1201 // at a major collection cycle, this is always currently
1202 // accurate.
1203 if (!_current_soft_ref_policy->should_clear_reference(obj)) {
1204 return false;
1205 }
1206 }
1208 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1209 const oop discovered = java_lang_ref_Reference::discovered(obj);
1210 assert(discovered->is_oop_or_null(), "bad discovered field");
1211 if (discovered != NULL) {
1212 // The reference has already been discovered...
1213 if (TraceReferenceGC) {
1214 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1215 obj, obj->blueprint()->internal_name());
1216 }
1217 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1218 // assumes that an object is not processed twice;
1219 // if it's been already discovered it must be on another
1220 // generation's discovered list; so we won't discover it.
1221 return false;
1222 } else {
1223 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1224 "Unrecognized policy");
1225 // Check assumption that an object is not potentially
1226 // discovered twice except by concurrent collectors that potentially
1227 // trace the same Reference object twice.
1228 assert(UseConcMarkSweepGC || UseG1GC,
1229 "Only possible with a concurrent marking collector");
1230 return true;
1231 }
1232 }
1234 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1235 verify_referent(obj);
1236 // enqueue if and only if either:
1237 // reference is in our span or
1238 // we are an atomic collector and referent is in our span
1239 if (_span.contains(obj_addr) ||
1240 (discovery_is_atomic() &&
1241 _span.contains(java_lang_ref_Reference::referent(obj)))) {
1242 // should_enqueue = true;
1243 } else {
1244 return false;
1245 }
1246 } else {
1247 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1248 _span.contains(obj_addr), "code inconsistency");
1249 }
1251 // Get the right type of discovered queue head.
1252 DiscoveredList* list = get_discovered_list(rt);
1253 if (list == NULL) {
1254 return false; // nothing special needs to be done
1255 }
1257 if (_discovery_is_mt) {
1258 add_to_discovered_list_mt(*list, obj, discovered_addr);
1259 } else {
1260 // If "_discovered_list_needs_barrier", we do write barriers when
1261 // updating the discovered reference list. Otherwise, we do a raw store
1262 // here: the field will be visited later when processing the discovered
1263 // references.
1264 oop current_head = list->head();
1265 // As in the case further above, since we are over-writing a NULL
1266 // pre-value, we can safely elide the pre-barrier here for the case of G1.
1267 assert(discovered == NULL, "control point invariant");
1268 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
1269 if (UseCompressedOops) {
1270 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
1271 } else {
1272 _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
1273 }
1274 guarantee(false, "Need to check non-G1 collector");
1275 }
1276 oop_store_raw(discovered_addr, current_head);
1277 if (_discovered_list_needs_barrier) {
1278 _bs->write_ref_field((void*)discovered_addr, current_head);
1279 }
1280 list->set_head(obj);
1281 list->inc_length(1);
1283 if (TraceReferenceGC) {
1284 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
1285 obj, obj->blueprint()->internal_name());
1286 }
1287 }
1288 assert(obj->is_oop(), "Enqueued a bad reference");
1289 verify_referent(obj);
1290 return true;
1291 }
1293 // Preclean the discovered references by removing those
1294 // whose referents are alive, and by marking from those that
1295 // are not active. These lists can be handled here
1296 // in any order and, indeed, concurrently.
1297 void ReferenceProcessor::preclean_discovered_references(
1298 BoolObjectClosure* is_alive,
1299 OopClosure* keep_alive,
1300 VoidClosure* complete_gc,
1301 YieldClosure* yield,
1302 bool should_unload_classes) {
1304 NOT_PRODUCT(verify_ok_to_handle_reflists());
1306 #ifdef ASSERT
1307 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
1308 CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
1309 ExplicitGCInvokesConcurrentAndUnloadsClasses &&
1310 UseConcMarkSweepGC && should_unload_classes;
1311 RememberKlassesChecker mx(must_remember_klasses);
1312 #endif
1313 // Soft references
1314 {
1315 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
1316 false, gclog_or_tty);
1317 for (int i = 0; i < _max_num_q; i++) {
1318 if (yield->should_return()) {
1319 return;
1320 }
1321 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
1322 keep_alive, complete_gc, yield);
1323 }
1324 }
1326 // Weak references
1327 {
1328 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
1329 false, gclog_or_tty);
1330 for (int i = 0; i < _max_num_q; i++) {
1331 if (yield->should_return()) {
1332 return;
1333 }
1334 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
1335 keep_alive, complete_gc, yield);
1336 }
1337 }
1339 // Final references
1340 {
1341 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
1342 false, gclog_or_tty);
1343 for (int i = 0; i < _max_num_q; i++) {
1344 if (yield->should_return()) {
1345 return;
1346 }
1347 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
1348 keep_alive, complete_gc, yield);
1349 }
1350 }
1352 // Phantom references
1353 {
1354 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
1355 false, gclog_or_tty);
1356 for (int i = 0; i < _max_num_q; i++) {
1357 if (yield->should_return()) {
1358 return;
1359 }
1360 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
1361 keep_alive, complete_gc, yield);
1362 }
1363 }
1364 }
1366 // Walk the given discovered ref list, and remove all reference objects
1367 // whose referents are still alive, whose referents are NULL or which
1368 // are not active (have a non-NULL next field). NOTE: When we are
1369 // thus precleaning the ref lists (which happens single-threaded today),
1370 // we do not disable refs discovery to honour the correct semantics of
1371 // java.lang.Reference. As a result, we need to be careful below
1372 // that ref removal steps interleave safely with ref discovery steps
1373 // (in this thread).
1374 void
1375 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
1376 BoolObjectClosure* is_alive,
1377 OopClosure* keep_alive,
1378 VoidClosure* complete_gc,
1379 YieldClosure* yield) {
1380 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
1381 while (iter.has_next()) {
1382 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1383 oop obj = iter.obj();
1384 oop next = java_lang_ref_Reference::next(obj);
1385 if (iter.referent() == NULL || iter.is_referent_alive() ||
1386 next != NULL) {
1387 // The referent has been cleared, or is alive, or the Reference is not
1388 // active; we need to trace and mark its cohort.
1389 if (TraceReferenceGC) {
1390 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
1391 iter.obj(), iter.obj()->blueprint()->internal_name());
1392 }
1393 // Remove Reference object from list
1394 iter.remove();
1395 // Keep alive its cohort.
1396 iter.make_referent_alive();
1397 if (UseCompressedOops) {
1398 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
1399 keep_alive->do_oop(next_addr);
1400 } else {
1401 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
1402 keep_alive->do_oop(next_addr);
1403 }
1404 iter.move_to_next();
1405 } else {
1406 iter.next();
1407 }
1408 }
1409 // Close the reachable set
1410 complete_gc->do_void();
1412 NOT_PRODUCT(
1413 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
1414 gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
1415 "Refs in discovered list " INTPTR_FORMAT,
1416 iter.removed(), iter.processed(), (address)refs_list.head());
1417 }
1418 )
1419 }
1421 const char* ReferenceProcessor::list_name(int i) {
1422 assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
1423 int j = i / _max_num_q;
1424 switch (j) {
1425 case 0: return "SoftRef";
1426 case 1: return "WeakRef";
1427 case 2: return "FinalRef";
1428 case 3: return "PhantomRef";
1429 }
1430 ShouldNotReachHere();
1431 return NULL;
1432 }
1434 #ifndef PRODUCT
1435 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1436 // empty for now
1437 }
1438 #endif
1440 void ReferenceProcessor::verify() {
1441 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
1442 }
1444 #ifndef PRODUCT
1445 void ReferenceProcessor::clear_discovered_references() {
1446 guarantee(!_discovering_refs, "Discovering refs?");
1447 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
1448 oop obj = _discoveredSoftRefs[i].head();
1449 while (obj != sentinel_ref()) {
1450 oop next = java_lang_ref_Reference::discovered(obj);
1451 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
1452 obj = next;
1453 }
1454 _discoveredSoftRefs[i].set_head(sentinel_ref());
1455 _discoveredSoftRefs[i].set_length(0);
1456 }
1457 }
1458 #endif // PRODUCT