Thu, 21 Aug 2008 23:36:31 -0400
Merge
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_referenceProcessor.cpp.incl"
28 // List of discovered references.
29 class DiscoveredList {
30 public:
31 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
32 oop head() const {
33 return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) :
34 _oop_head;
35 }
36 HeapWord* adr_head() {
37 return UseCompressedOops ? (HeapWord*)&_compressed_head :
38 (HeapWord*)&_oop_head;
39 }
40 void set_head(oop o) {
41 if (UseCompressedOops) {
42 // Must compress the head ptr.
43 _compressed_head = oopDesc::encode_heap_oop_not_null(o);
44 } else {
45 _oop_head = o;
46 }
47 }
48 bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); }
49 size_t length() { return _len; }
50 void set_length(size_t len) { _len = len; }
51 private:
52 // Set value depending on UseCompressedOops. This could be a template class
53 // but then we have to fix all the instantiations and declarations that use this class.
54 oop _oop_head;
55 narrowOop _compressed_head;
56 size_t _len;
57 };
59 oop ReferenceProcessor::_sentinelRef = NULL;
61 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
63 void referenceProcessor_init() {
64 ReferenceProcessor::init_statics();
65 }
67 void ReferenceProcessor::init_statics() {
68 assert(_sentinelRef == NULL, "should be initialized precisely once");
69 EXCEPTION_MARK;
70 _sentinelRef = instanceKlass::cast(
71 SystemDictionary::reference_klass())->
72 allocate_permanent_instance(THREAD);
74 // Initialize the master soft ref clock.
75 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
77 if (HAS_PENDING_EXCEPTION) {
78 Handle ex(THREAD, PENDING_EXCEPTION);
79 vm_exit_during_initialization(ex);
80 }
81 assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
82 "Just constructed it!");
83 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
84 RefDiscoveryPolicy == ReferentBasedDiscovery,
85 "Unrecongnized RefDiscoveryPolicy");
86 }
88 ReferenceProcessor*
89 ReferenceProcessor::create_ref_processor(MemRegion span,
90 bool atomic_discovery,
91 bool mt_discovery,
92 BoolObjectClosure* is_alive_non_header,
93 int parallel_gc_threads,
94 bool mt_processing,
95 bool dl_needs_barrier) {
96 int mt_degree = 1;
97 if (parallel_gc_threads > 1) {
98 mt_degree = parallel_gc_threads;
99 }
100 ReferenceProcessor* rp =
101 new ReferenceProcessor(span, atomic_discovery,
102 mt_discovery, mt_degree,
103 mt_processing && (parallel_gc_threads > 0),
104 dl_needs_barrier);
105 if (rp == NULL) {
106 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
107 }
108 rp->set_is_alive_non_header(is_alive_non_header);
109 return rp;
110 }
112 ReferenceProcessor::ReferenceProcessor(MemRegion span,
113 bool atomic_discovery,
114 bool mt_discovery,
115 int mt_degree,
116 bool mt_processing,
117 bool discovered_list_needs_barrier) :
118 _discovering_refs(false),
119 _enqueuing_is_done(false),
120 _is_alive_non_header(NULL),
121 _discovered_list_needs_barrier(discovered_list_needs_barrier),
122 _bs(NULL),
123 _processing_is_mt(mt_processing),
124 _next_id(0)
125 {
126 _span = span;
127 _discovery_is_atomic = atomic_discovery;
128 _discovery_is_mt = mt_discovery;
129 _num_q = mt_degree;
130 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _num_q * subclasses_of_ref);
131 if (_discoveredSoftRefs == NULL) {
132 vm_exit_during_initialization("Could not allocated RefProc Array");
133 }
134 _discoveredWeakRefs = &_discoveredSoftRefs[_num_q];
135 _discoveredFinalRefs = &_discoveredWeakRefs[_num_q];
136 _discoveredPhantomRefs = &_discoveredFinalRefs[_num_q];
137 assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
138 // Initialized all entries to _sentinelRef
139 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
140 _discoveredSoftRefs[i].set_head(sentinel_ref());
141 _discoveredSoftRefs[i].set_length(0);
142 }
143 // If we do barreirs, cache a copy of the barrier set.
144 if (discovered_list_needs_barrier) {
145 _bs = Universe::heap()->barrier_set();
146 }
147 }
149 #ifndef PRODUCT
150 void ReferenceProcessor::verify_no_references_recorded() {
151 guarantee(!_discovering_refs, "Discovering refs?");
152 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
153 guarantee(_discoveredSoftRefs[i].empty(),
154 "Found non-empty discovered list");
155 }
156 }
157 #endif
159 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
160 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
161 if (UseCompressedOops) {
162 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
163 } else {
164 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
165 }
166 }
167 }
169 void ReferenceProcessor::oops_do(OopClosure* f) {
170 f->do_oop(adr_sentinel_ref());
171 }
173 void ReferenceProcessor::update_soft_ref_master_clock() {
174 // Update (advance) the soft ref master clock field. This must be done
175 // after processing the soft ref list.
176 jlong now = os::javaTimeMillis();
177 jlong clock = java_lang_ref_SoftReference::clock();
178 NOT_PRODUCT(
179 if (now < clock) {
180 warning("time warp: %d to %d", clock, now);
181 }
182 )
183 // In product mode, protect ourselves from system time being adjusted
184 // externally and going backward; see note in the implementation of
185 // GenCollectedHeap::time_since_last_gc() for the right way to fix
186 // this uniformly throughout the VM; see bug-id 4741166. XXX
187 if (now > clock) {
188 java_lang_ref_SoftReference::set_clock(now);
189 }
190 // Else leave clock stalled at its old value until time progresses
191 // past clock value.
192 }
194 void ReferenceProcessor::process_discovered_references(
195 ReferencePolicy* policy,
196 BoolObjectClosure* is_alive,
197 OopClosure* keep_alive,
198 VoidClosure* complete_gc,
199 AbstractRefProcTaskExecutor* task_executor) {
200 NOT_PRODUCT(verify_ok_to_handle_reflists());
202 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
203 // Stop treating discovered references specially.
204 disable_discovery();
206 bool trace_time = PrintGCDetails && PrintReferenceGC;
207 // Soft references
208 {
209 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
210 process_discovered_reflist(_discoveredSoftRefs, policy, true,
211 is_alive, keep_alive, complete_gc, task_executor);
212 }
214 update_soft_ref_master_clock();
216 // Weak references
217 {
218 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
219 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
220 is_alive, keep_alive, complete_gc, task_executor);
221 }
223 // Final references
224 {
225 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
226 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
227 is_alive, keep_alive, complete_gc, task_executor);
228 }
230 // Phantom references
231 {
232 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
233 process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
234 is_alive, keep_alive, complete_gc, task_executor);
235 }
237 // Weak global JNI references. It would make more sense (semantically) to
238 // traverse these simultaneously with the regular weak references above, but
239 // that is not how the JDK1.2 specification is. See #4126360. Native code can
240 // thus use JNI weak references to circumvent the phantom references and
241 // resurrect a "post-mortem" object.
242 {
243 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
244 if (task_executor != NULL) {
245 task_executor->set_single_threaded_mode();
246 }
247 process_phaseJNI(is_alive, keep_alive, complete_gc);
248 }
249 }
251 #ifndef PRODUCT
252 // Calculate the number of jni handles.
253 uint ReferenceProcessor::count_jni_refs() {
254 class AlwaysAliveClosure: public BoolObjectClosure {
255 public:
256 virtual bool do_object_b(oop obj) { return true; }
257 virtual void do_object(oop obj) { assert(false, "Don't call"); }
258 };
260 class CountHandleClosure: public OopClosure {
261 private:
262 int _count;
263 public:
264 CountHandleClosure(): _count(0) {}
265 void do_oop(oop* unused) { _count++; }
266 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
267 int count() { return _count; }
268 };
269 CountHandleClosure global_handle_count;
270 AlwaysAliveClosure always_alive;
271 JNIHandles::weak_oops_do(&always_alive, &global_handle_count);
272 return global_handle_count.count();
273 }
274 #endif
276 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
277 OopClosure* keep_alive,
278 VoidClosure* complete_gc) {
279 #ifndef PRODUCT
280 if (PrintGCDetails && PrintReferenceGC) {
281 unsigned int count = count_jni_refs();
282 gclog_or_tty->print(", %u refs", count);
283 }
284 #endif
285 JNIHandles::weak_oops_do(is_alive, keep_alive);
286 // Finally remember to keep sentinel around
287 keep_alive->do_oop(adr_sentinel_ref());
288 complete_gc->do_void();
289 }
292 template <class T>
293 static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
294 AbstractRefProcTaskExecutor* task_executor) {
296 // Remember old value of pending references list
297 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
298 T old_pending_list_value = *pending_list_addr;
300 // Enqueue references that are not made active again, and
301 // clear the decks for the next collection (cycle).
302 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
303 // Do the oop-check on pending_list_addr missed in
304 // enqueue_discovered_reflist. We should probably
305 // do a raw oop_check so that future such idempotent
306 // oop_stores relying on the oop-check side-effect
307 // may be elided automatically and safely without
308 // affecting correctness.
309 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
311 // Stop treating discovered references specially.
312 ref->disable_discovery();
314 // Return true if new pending references were added
315 return old_pending_list_value != *pending_list_addr;
316 }
318 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
319 NOT_PRODUCT(verify_ok_to_handle_reflists());
320 if (UseCompressedOops) {
321 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
322 } else {
323 return enqueue_discovered_ref_helper<oop>(this, task_executor);
324 }
325 }
327 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
328 HeapWord* pending_list_addr) {
329 // Given a list of refs linked through the "discovered" field
330 // (java.lang.ref.Reference.discovered) chain them through the
331 // "next" field (java.lang.ref.Reference.next) and prepend
332 // to the pending list.
333 if (TraceReferenceGC && PrintGCDetails) {
334 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
335 INTPTR_FORMAT, (address)refs_list.head());
336 }
337 oop obj = refs_list.head();
338 // Walk down the list, copying the discovered field into
339 // the next field and clearing it (except for the last
340 // non-sentinel object which is treated specially to avoid
341 // confusion with an active reference).
342 while (obj != sentinel_ref()) {
343 assert(obj->is_instanceRef(), "should be reference object");
344 oop next = java_lang_ref_Reference::discovered(obj);
345 if (TraceReferenceGC && PrintGCDetails) {
346 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
347 obj, next);
348 }
349 assert(java_lang_ref_Reference::next(obj) == NULL,
350 "The reference should not be enqueued");
351 if (next == sentinel_ref()) { // obj is last
352 // Swap refs_list into pendling_list_addr and
353 // set obj's next to what we read from pending_list_addr.
354 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
355 // Need oop_check on pending_list_addr above;
356 // see special oop-check code at the end of
357 // enqueue_discovered_reflists() further below.
358 if (old == NULL) {
359 // obj should be made to point to itself, since
360 // pending list was empty.
361 java_lang_ref_Reference::set_next(obj, obj);
362 } else {
363 java_lang_ref_Reference::set_next(obj, old);
364 }
365 } else {
366 java_lang_ref_Reference::set_next(obj, next);
367 }
368 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
369 obj = next;
370 }
371 }
373 // Parallel enqueue task
374 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
375 public:
376 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
377 DiscoveredList discovered_refs[],
378 HeapWord* pending_list_addr,
379 oop sentinel_ref,
380 int n_queues)
381 : EnqueueTask(ref_processor, discovered_refs,
382 pending_list_addr, sentinel_ref, n_queues)
383 { }
385 virtual void work(unsigned int work_id) {
386 assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
387 // Simplest first cut: static partitioning.
388 int index = work_id;
389 for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) {
390 _ref_processor.enqueue_discovered_reflist(
391 _refs_lists[index], _pending_list_addr);
392 _refs_lists[index].set_head(_sentinel_ref);
393 _refs_lists[index].set_length(0);
394 }
395 }
396 };
398 // Enqueue references that are not made active again
399 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
400 AbstractRefProcTaskExecutor* task_executor) {
401 if (_processing_is_mt && task_executor != NULL) {
402 // Parallel code
403 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
404 pending_list_addr, sentinel_ref(), _num_q);
405 task_executor->execute(tsk);
406 } else {
407 // Serial code: call the parent class's implementation
408 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
409 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
410 _discoveredSoftRefs[i].set_head(sentinel_ref());
411 _discoveredSoftRefs[i].set_length(0);
412 }
413 }
414 }
416 // Iterator for the list of discovered references.
417 class DiscoveredListIterator {
418 public:
419 inline DiscoveredListIterator(DiscoveredList& refs_list,
420 OopClosure* keep_alive,
421 BoolObjectClosure* is_alive);
423 // End Of List.
424 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
426 // Get oop to the Reference object.
427 inline oop obj() const { return _ref; }
429 // Get oop to the referent object.
430 inline oop referent() const { return _referent; }
432 // Returns true if referent is alive.
433 inline bool is_referent_alive() const;
435 // Loads data for the current reference.
436 // The "allow_null_referent" argument tells us to allow for the possibility
437 // of a NULL referent in the discovered Reference object. This typically
438 // happens in the case of concurrent collectors that may have done the
439 // discovery concurrently or interleaved with mutator execution.
440 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
442 // Move to the next discovered reference.
443 inline void next();
445 // Remove the current reference from the list and move to the next.
446 inline void remove();
448 // Make the Reference object active again.
449 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
451 // Make the referent alive.
452 inline void make_referent_alive() {
453 if (UseCompressedOops) {
454 _keep_alive->do_oop((narrowOop*)_referent_addr);
455 } else {
456 _keep_alive->do_oop((oop*)_referent_addr);
457 }
458 }
460 // Update the discovered field.
461 inline void update_discovered() {
462 // First _prev_next ref actually points into DiscoveredList (gross).
463 if (UseCompressedOops) {
464 _keep_alive->do_oop((narrowOop*)_prev_next);
465 } else {
466 _keep_alive->do_oop((oop*)_prev_next);
467 }
468 }
470 // NULL out referent pointer.
471 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
473 // Statistics
474 NOT_PRODUCT(
475 inline size_t processed() const { return _processed; }
476 inline size_t removed() const { return _removed; }
477 )
479 private:
480 inline void move_to_next();
482 private:
483 DiscoveredList& _refs_list;
484 HeapWord* _prev_next;
485 oop _ref;
486 HeapWord* _discovered_addr;
487 oop _next;
488 HeapWord* _referent_addr;
489 oop _referent;
490 OopClosure* _keep_alive;
491 BoolObjectClosure* _is_alive;
492 DEBUG_ONLY(
493 oop _first_seen; // cyclic linked list check
494 )
495 NOT_PRODUCT(
496 size_t _processed;
497 size_t _removed;
498 )
499 };
501 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
502 OopClosure* keep_alive,
503 BoolObjectClosure* is_alive)
504 : _refs_list(refs_list),
505 _prev_next(refs_list.adr_head()),
506 _ref(refs_list.head()),
507 #ifdef ASSERT
508 _first_seen(refs_list.head()),
509 #endif
510 #ifndef PRODUCT
511 _processed(0),
512 _removed(0),
513 #endif
514 _next(refs_list.head()),
515 _keep_alive(keep_alive),
516 _is_alive(is_alive)
517 { }
519 inline bool DiscoveredListIterator::is_referent_alive() const {
520 return _is_alive->do_object_b(_referent);
521 }
523 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
524 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
525 oop discovered = java_lang_ref_Reference::discovered(_ref);
526 assert(_discovered_addr && discovered->is_oop_or_null(),
527 "discovered field is bad");
528 _next = discovered;
529 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
530 _referent = java_lang_ref_Reference::referent(_ref);
531 assert(Universe::heap()->is_in_reserved_or_null(_referent),
532 "Wrong oop found in java.lang.Reference object");
533 assert(allow_null_referent ?
534 _referent->is_oop_or_null()
535 : _referent->is_oop(),
536 "bad referent");
537 }
539 inline void DiscoveredListIterator::next() {
540 _prev_next = _discovered_addr;
541 move_to_next();
542 }
544 inline void DiscoveredListIterator::remove() {
545 assert(_ref->is_oop(), "Dropping a bad reference");
546 oop_store_raw(_discovered_addr, NULL);
547 // First _prev_next ref actually points into DiscoveredList (gross).
548 if (UseCompressedOops) {
549 // Remove Reference object from list.
550 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
551 } else {
552 // Remove Reference object from list.
553 oopDesc::store_heap_oop((oop*)_prev_next, _next);
554 }
555 NOT_PRODUCT(_removed++);
556 move_to_next();
557 }
559 inline void DiscoveredListIterator::move_to_next() {
560 _ref = _next;
561 assert(_ref != _first_seen, "cyclic ref_list found");
562 NOT_PRODUCT(_processed++);
563 }
565 // NOTE: process_phase*() are largely similar, and at a high level
566 // merely iterate over the extant list applying a predicate to
567 // each of its elements and possibly removing that element from the
568 // list and applying some further closures to that element.
569 // We should consider the possibility of replacing these
570 // process_phase*() methods by abstracting them into
571 // a single general iterator invocation that receives appropriate
572 // closures that accomplish this work.
574 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
575 // referents are not alive, but that should be kept alive for policy reasons.
576 // Keep alive the transitive closure of all such referents.
577 void
578 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
579 ReferencePolicy* policy,
580 BoolObjectClosure* is_alive,
581 OopClosure* keep_alive,
582 VoidClosure* complete_gc) {
583 assert(policy != NULL, "Must have a non-NULL policy");
584 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
585 // Decide which softly reachable refs should be kept alive.
586 while (iter.has_next()) {
587 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
588 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
589 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
590 if (TraceReferenceGC) {
591 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
592 iter.obj(), iter.obj()->blueprint()->internal_name());
593 }
594 // Make the Reference object active again
595 iter.make_active();
596 // keep the referent around
597 iter.make_referent_alive();
598 // Remove Reference object from list
599 iter.remove();
600 } else {
601 iter.next();
602 }
603 }
604 // Close the reachable set
605 complete_gc->do_void();
606 NOT_PRODUCT(
607 if (PrintGCDetails && TraceReferenceGC) {
608 gclog_or_tty->print(" Dropped %d dead Refs out of %d "
609 "discovered Refs by policy ", iter.removed(), iter.processed());
610 }
611 )
612 }
614 // Traverse the list and remove any Refs that are not active, or
615 // whose referents are either alive or NULL.
616 void
617 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
618 BoolObjectClosure* is_alive,
619 OopClosure* keep_alive) {
620 assert(discovery_is_atomic(), "Error");
621 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
622 while (iter.has_next()) {
623 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
624 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
625 assert(next == NULL, "Should not discover inactive Reference");
626 if (iter.is_referent_alive()) {
627 if (TraceReferenceGC) {
628 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
629 iter.obj(), iter.obj()->blueprint()->internal_name());
630 }
631 // The referent is reachable after all.
632 // Update the referent pointer as necessary: Note that this
633 // should not entail any recursive marking because the
634 // referent must already have been traversed.
635 iter.make_referent_alive();
636 // Remove Reference object from list
637 iter.remove();
638 } else {
639 iter.next();
640 }
641 }
642 NOT_PRODUCT(
643 if (PrintGCDetails && TraceReferenceGC) {
644 gclog_or_tty->print(" Dropped %d active Refs out of %d "
645 "Refs in discovered list ", iter.removed(), iter.processed());
646 }
647 )
648 }
650 void
651 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
652 BoolObjectClosure* is_alive,
653 OopClosure* keep_alive,
654 VoidClosure* complete_gc) {
655 assert(!discovery_is_atomic(), "Error");
656 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
657 while (iter.has_next()) {
658 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
659 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
660 oop next = java_lang_ref_Reference::next(iter.obj());
661 if ((iter.referent() == NULL || iter.is_referent_alive() ||
662 next != NULL)) {
663 assert(next->is_oop_or_null(), "bad next field");
664 // Remove Reference object from list
665 iter.remove();
666 // Trace the cohorts
667 iter.make_referent_alive();
668 if (UseCompressedOops) {
669 keep_alive->do_oop((narrowOop*)next_addr);
670 } else {
671 keep_alive->do_oop((oop*)next_addr);
672 }
673 } else {
674 iter.next();
675 }
676 }
677 // Now close the newly reachable set
678 complete_gc->do_void();
679 NOT_PRODUCT(
680 if (PrintGCDetails && TraceReferenceGC) {
681 gclog_or_tty->print(" Dropped %d active Refs out of %d "
682 "Refs in discovered list ", iter.removed(), iter.processed());
683 }
684 )
685 }
687 // Traverse the list and process the referents, by either
688 // clearing them or keeping them (and their reachable
689 // closure) alive.
690 void
691 ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
692 bool clear_referent,
693 BoolObjectClosure* is_alive,
694 OopClosure* keep_alive,
695 VoidClosure* complete_gc) {
696 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
697 while (iter.has_next()) {
698 iter.update_discovered();
699 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
700 if (clear_referent) {
701 // NULL out referent pointer
702 iter.clear_referent();
703 } else {
704 // keep the referent around
705 iter.make_referent_alive();
706 }
707 if (TraceReferenceGC) {
708 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
709 clear_referent ? "cleared " : "",
710 iter.obj(), iter.obj()->blueprint()->internal_name());
711 }
712 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
713 // If discovery is concurrent, we may have objects with null referents,
714 // being those that were concurrently cleared after they were discovered
715 // (and not subsequently precleaned).
716 assert( (discovery_is_atomic() && iter.referent()->is_oop())
717 || (!discovery_is_atomic() && iter.referent()->is_oop_or_null(UseConcMarkSweepGC)),
718 "Adding a bad referent");
719 iter.next();
720 }
721 // Remember to keep sentinel pointer around
722 iter.update_discovered();
723 // Close the reachable set
724 complete_gc->do_void();
725 }
727 void
728 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
729 oop obj = refs_list.head();
730 while (obj != sentinel_ref()) {
731 oop discovered = java_lang_ref_Reference::discovered(obj);
732 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
733 obj = discovered;
734 }
735 refs_list.set_head(sentinel_ref());
736 refs_list.set_length(0);
737 }
739 void ReferenceProcessor::abandon_partial_discovery() {
740 // loop over the lists
741 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
742 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
743 gclog_or_tty->print_cr(
744 "\nAbandoning %s discovered list",
745 list_name(i));
746 }
747 abandon_partial_discovered_list(_discoveredSoftRefs[i]);
748 }
749 }
751 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
752 public:
753 RefProcPhase1Task(ReferenceProcessor& ref_processor,
754 DiscoveredList refs_lists[],
755 ReferencePolicy* policy,
756 bool marks_oops_alive)
757 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
758 _policy(policy)
759 { }
760 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
761 OopClosure& keep_alive,
762 VoidClosure& complete_gc)
763 {
764 _ref_processor.process_phase1(_refs_lists[i], _policy,
765 &is_alive, &keep_alive, &complete_gc);
766 }
767 private:
768 ReferencePolicy* _policy;
769 };
771 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
772 public:
773 RefProcPhase2Task(ReferenceProcessor& ref_processor,
774 DiscoveredList refs_lists[],
775 bool marks_oops_alive)
776 : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
777 { }
778 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
779 OopClosure& keep_alive,
780 VoidClosure& complete_gc)
781 {
782 _ref_processor.process_phase2(_refs_lists[i],
783 &is_alive, &keep_alive, &complete_gc);
784 }
785 };
787 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
788 public:
789 RefProcPhase3Task(ReferenceProcessor& ref_processor,
790 DiscoveredList refs_lists[],
791 bool clear_referent,
792 bool marks_oops_alive)
793 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
794 _clear_referent(clear_referent)
795 { }
796 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
797 OopClosure& keep_alive,
798 VoidClosure& complete_gc)
799 {
800 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
801 &is_alive, &keep_alive, &complete_gc);
802 }
803 private:
804 bool _clear_referent;
805 };
807 // Balances reference queues.
808 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
809 {
810 // calculate total length
811 size_t total_refs = 0;
812 for (int i = 0; i < _num_q; ++i) {
813 total_refs += ref_lists[i].length();
814 }
815 size_t avg_refs = total_refs / _num_q + 1;
816 int to_idx = 0;
817 for (int from_idx = 0; from_idx < _num_q; from_idx++) {
818 while (ref_lists[from_idx].length() > avg_refs) {
819 assert(to_idx < _num_q, "Sanity Check!");
820 if (ref_lists[to_idx].length() < avg_refs) {
821 // move superfluous refs
822 size_t refs_to_move =
823 MIN2(ref_lists[from_idx].length() - avg_refs,
824 avg_refs - ref_lists[to_idx].length());
825 oop move_head = ref_lists[from_idx].head();
826 oop move_tail = move_head;
827 oop new_head = move_head;
828 // find an element to split the list on
829 for (size_t j = 0; j < refs_to_move; ++j) {
830 move_tail = new_head;
831 new_head = java_lang_ref_Reference::discovered(new_head);
832 }
833 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
834 ref_lists[to_idx].set_head(move_head);
835 ref_lists[to_idx].set_length(ref_lists[to_idx].length() + refs_to_move);
836 ref_lists[from_idx].set_head(new_head);
837 ref_lists[from_idx].set_length(ref_lists[from_idx].length() - refs_to_move);
838 } else {
839 ++to_idx;
840 }
841 }
842 }
843 }
845 void
846 ReferenceProcessor::process_discovered_reflist(
847 DiscoveredList refs_lists[],
848 ReferencePolicy* policy,
849 bool clear_referent,
850 BoolObjectClosure* is_alive,
851 OopClosure* keep_alive,
852 VoidClosure* complete_gc,
853 AbstractRefProcTaskExecutor* task_executor)
854 {
855 bool mt = task_executor != NULL && _processing_is_mt;
856 if (mt && ParallelRefProcBalancingEnabled) {
857 balance_queues(refs_lists);
858 }
859 if (PrintReferenceGC && PrintGCDetails) {
860 size_t total = 0;
861 for (int i = 0; i < _num_q; ++i) {
862 total += refs_lists[i].length();
863 }
864 gclog_or_tty->print(", %u refs", total);
865 }
867 // Phase 1 (soft refs only):
868 // . Traverse the list and remove any SoftReferences whose
869 // referents are not alive, but that should be kept alive for
870 // policy reasons. Keep alive the transitive closure of all
871 // such referents.
872 if (policy != NULL) {
873 if (mt) {
874 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
875 task_executor->execute(phase1);
876 } else {
877 for (int i = 0; i < _num_q; i++) {
878 process_phase1(refs_lists[i], policy,
879 is_alive, keep_alive, complete_gc);
880 }
881 }
882 } else { // policy == NULL
883 assert(refs_lists != _discoveredSoftRefs,
884 "Policy must be specified for soft references.");
885 }
887 // Phase 2:
888 // . Traverse the list and remove any refs whose referents are alive.
889 if (mt) {
890 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
891 task_executor->execute(phase2);
892 } else {
893 for (int i = 0; i < _num_q; i++) {
894 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
895 }
896 }
898 // Phase 3:
899 // . Traverse the list and process referents as appropriate.
900 if (mt) {
901 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
902 task_executor->execute(phase3);
903 } else {
904 for (int i = 0; i < _num_q; i++) {
905 process_phase3(refs_lists[i], clear_referent,
906 is_alive, keep_alive, complete_gc);
907 }
908 }
909 }
911 void ReferenceProcessor::clean_up_discovered_references() {
912 // loop over the lists
913 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
914 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
915 gclog_or_tty->print_cr(
916 "\nScrubbing %s discovered list of Null referents",
917 list_name(i));
918 }
919 clean_up_discovered_reflist(_discoveredSoftRefs[i]);
920 }
921 }
923 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
924 assert(!discovery_is_atomic(), "Else why call this method?");
925 DiscoveredListIterator iter(refs_list, NULL, NULL);
926 size_t length = refs_list.length();
927 while (iter.has_next()) {
928 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
929 oop next = java_lang_ref_Reference::next(iter.obj());
930 assert(next->is_oop_or_null(), "bad next field");
931 // If referent has been cleared or Reference is not active,
932 // drop it.
933 if (iter.referent() == NULL || next != NULL) {
934 debug_only(
935 if (PrintGCDetails && TraceReferenceGC) {
936 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
937 INTPTR_FORMAT " with next field: " INTPTR_FORMAT
938 " and referent: " INTPTR_FORMAT,
939 iter.obj(), next, iter.referent());
940 }
941 )
942 // Remove Reference object from list
943 iter.remove();
944 --length;
945 } else {
946 iter.next();
947 }
948 }
949 refs_list.set_length(length);
950 NOT_PRODUCT(
951 if (PrintGCDetails && TraceReferenceGC) {
952 gclog_or_tty->print(
953 " Removed %d Refs with NULL referents out of %d discovered Refs",
954 iter.removed(), iter.processed());
955 }
956 )
957 }
959 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
960 int id = 0;
961 // Determine the queue index to use for this object.
962 if (_discovery_is_mt) {
963 // During a multi-threaded discovery phase,
964 // each thread saves to its "own" list.
965 Thread* thr = Thread::current();
966 assert(thr->is_GC_task_thread(),
967 "Dubious cast from Thread* to WorkerThread*?");
968 id = ((WorkerThread*)thr)->id();
969 } else {
970 // single-threaded discovery, we save in round-robin
971 // fashion to each of the lists.
972 if (_processing_is_mt) {
973 id = next_id();
974 }
975 }
976 assert(0 <= id && id < _num_q, "Id is out-of-bounds (call Freud?)");
978 // Get the discovered queue to which we will add
979 DiscoveredList* list = NULL;
980 switch (rt) {
981 case REF_OTHER:
982 // Unknown reference type, no special treatment
983 break;
984 case REF_SOFT:
985 list = &_discoveredSoftRefs[id];
986 break;
987 case REF_WEAK:
988 list = &_discoveredWeakRefs[id];
989 break;
990 case REF_FINAL:
991 list = &_discoveredFinalRefs[id];
992 break;
993 case REF_PHANTOM:
994 list = &_discoveredPhantomRefs[id];
995 break;
996 case REF_NONE:
997 // we should not reach here if we are an instanceRefKlass
998 default:
999 ShouldNotReachHere();
1000 }
1001 return list;
1002 }
1004 inline void
1005 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1006 oop obj,
1007 HeapWord* discovered_addr) {
1008 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1009 // First we must make sure this object is only enqueued once. CAS in a non null
1010 // discovered_addr.
1011 oop current_head = refs_list.head();
1013 // Note: In the case of G1, this pre-barrier is strictly
1014 // not necessary because the only case we are interested in
1015 // here is when *discovered_addr is NULL, so this will expand to
1016 // nothing. As a result, I am just manually eliding this out for G1.
1017 if (_discovered_list_needs_barrier && !UseG1GC) {
1018 _bs->write_ref_field_pre((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
1019 }
1020 oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
1021 NULL);
1022 if (retest == NULL) {
1023 // This thread just won the right to enqueue the object.
1024 // We have separate lists for enqueueing so no synchronization
1025 // is necessary.
1026 refs_list.set_head(obj);
1027 refs_list.set_length(refs_list.length() + 1);
1028 if (_discovered_list_needs_barrier) {
1029 _bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
1030 }
1032 } else {
1033 // If retest was non NULL, another thread beat us to it:
1034 // The reference has already been discovered...
1035 if (TraceReferenceGC) {
1036 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1037 obj, obj->blueprint()->internal_name());
1038 }
1039 }
1040 }
1042 // We mention two of several possible choices here:
1043 // #0: if the reference object is not in the "originating generation"
1044 // (or part of the heap being collected, indicated by our "span"
1045 // we don't treat it specially (i.e. we scan it as we would
1046 // a normal oop, treating its references as strong references).
1047 // This means that references can't be enqueued unless their
1048 // referent is also in the same span. This is the simplest,
1049 // most "local" and most conservative approach, albeit one
1050 // that may cause weak references to be enqueued least promptly.
1051 // We call this choice the "ReferenceBasedDiscovery" policy.
1052 // #1: the reference object may be in any generation (span), but if
1053 // the referent is in the generation (span) being currently collected
1054 // then we can discover the reference object, provided
1055 // the object has not already been discovered by
1056 // a different concurrently running collector (as may be the
1057 // case, for instance, if the reference object is in CMS and
1058 // the referent in DefNewGeneration), and provided the processing
1059 // of this reference object by the current collector will
1060 // appear atomic to every other collector in the system.
1061 // (Thus, for instance, a concurrent collector may not
1062 // discover references in other generations even if the
1063 // referent is in its own generation). This policy may,
1064 // in certain cases, enqueue references somewhat sooner than
1065 // might Policy #0 above, but at marginally increased cost
1066 // and complexity in processing these references.
1067 // We call this choice the "RefeferentBasedDiscovery" policy.
1068 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1069 // We enqueue references only if we are discovering refs
1070 // (rather than processing discovered refs).
1071 if (!_discovering_refs || !RegisterReferences) {
1072 return false;
1073 }
1074 // We only enqueue active references.
1075 oop next = java_lang_ref_Reference::next(obj);
1076 if (next != NULL) {
1077 return false;
1078 }
1080 HeapWord* obj_addr = (HeapWord*)obj;
1081 if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1082 !_span.contains(obj_addr)) {
1083 // Reference is not in the originating generation;
1084 // don't treat it specially (i.e. we want to scan it as a normal
1085 // object with strong references).
1086 return false;
1087 }
1089 // We only enqueue references whose referents are not (yet) strongly
1090 // reachable.
1091 if (is_alive_non_header() != NULL) {
1092 oop referent = java_lang_ref_Reference::referent(obj);
1093 // We'd like to assert the following:
1094 // assert(referent != NULL, "Refs with null referents already filtered");
1095 // However, since this code may be executed concurrently with
1096 // mutators, which can clear() the referent, it is not
1097 // guaranteed that the referent is non-NULL.
1098 if (is_alive_non_header()->do_object_b(referent)) {
1099 return false; // referent is reachable
1100 }
1101 }
1103 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1104 const oop discovered = java_lang_ref_Reference::discovered(obj);
1105 assert(discovered->is_oop_or_null(), "bad discovered field");
1106 if (discovered != NULL) {
1107 // The reference has already been discovered...
1108 if (TraceReferenceGC) {
1109 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1110 obj, obj->blueprint()->internal_name());
1111 }
1112 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1113 // assumes that an object is not processed twice;
1114 // if it's been already discovered it must be on another
1115 // generation's discovered list; so we won't discover it.
1116 return false;
1117 } else {
1118 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1119 "Unrecognized policy");
1120 // Check assumption that an object is not potentially
1121 // discovered twice except by concurrent collectors that potentially
1122 // trace the same Reference object twice.
1123 assert(UseConcMarkSweepGC,
1124 "Only possible with an incremental-update concurrent collector");
1125 return true;
1126 }
1127 }
1129 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1130 oop referent = java_lang_ref_Reference::referent(obj);
1131 assert(referent->is_oop(), "bad referent");
1132 // enqueue if and only if either:
1133 // reference is in our span or
1134 // we are an atomic collector and referent is in our span
1135 if (_span.contains(obj_addr) ||
1136 (discovery_is_atomic() && _span.contains(referent))) {
1137 // should_enqueue = true;
1138 } else {
1139 return false;
1140 }
1141 } else {
1142 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1143 _span.contains(obj_addr), "code inconsistency");
1144 }
1146 // Get the right type of discovered queue head.
1147 DiscoveredList* list = get_discovered_list(rt);
1148 if (list == NULL) {
1149 return false; // nothing special needs to be done
1150 }
1152 if (_discovery_is_mt) {
1153 add_to_discovered_list_mt(*list, obj, discovered_addr);
1154 } else {
1155 // If "_discovered_list_needs_barrier", we do write barriers when
1156 // updating the discovered reference list. Otherwise, we do a raw store
1157 // here: the field will be visited later when processing the discovered
1158 // references.
1159 oop current_head = list->head();
1160 // As in the case further above, since we are over-writing a NULL
1161 // pre-value, we can safely elide the pre-barrier here for the case of G1.
1162 assert(discovered == NULL, "control point invariant");
1163 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
1164 _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
1165 }
1166 oop_store_raw(discovered_addr, current_head);
1167 if (_discovered_list_needs_barrier) {
1168 _bs->write_ref_field((oop*)discovered_addr, current_head);
1169 }
1170 list->set_head(obj);
1171 list->set_length(list->length() + 1);
1172 }
1174 // In the MT discovery case, it is currently possible to see
1175 // the following message multiple times if several threads
1176 // discover a reference about the same time. Only one will
1177 // however have actually added it to the disocvered queue.
1178 // One could let add_to_discovered_list_mt() return an
1179 // indication for success in queueing (by 1 thread) or
1180 // failure (by all other threads), but I decided the extra
1181 // code was not worth the effort for something that is
1182 // only used for debugging support.
1183 if (TraceReferenceGC) {
1184 oop referent = java_lang_ref_Reference::referent(obj);
1185 if (PrintGCDetails) {
1186 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
1187 obj, obj->blueprint()->internal_name());
1188 }
1189 assert(referent->is_oop(), "Enqueued a bad referent");
1190 }
1191 assert(obj->is_oop(), "Enqueued a bad reference");
1192 return true;
1193 }
1195 // Preclean the discovered references by removing those
1196 // whose referents are alive, and by marking from those that
1197 // are not active. These lists can be handled here
1198 // in any order and, indeed, concurrently.
1199 void ReferenceProcessor::preclean_discovered_references(
1200 BoolObjectClosure* is_alive,
1201 OopClosure* keep_alive,
1202 VoidClosure* complete_gc,
1203 YieldClosure* yield) {
1205 NOT_PRODUCT(verify_ok_to_handle_reflists());
1207 // Soft references
1208 {
1209 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
1210 false, gclog_or_tty);
1211 for (int i = 0; i < _num_q; i++) {
1212 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
1213 keep_alive, complete_gc, yield);
1214 }
1215 }
1216 if (yield->should_return()) {
1217 return;
1218 }
1220 // Weak references
1221 {
1222 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
1223 false, gclog_or_tty);
1224 for (int i = 0; i < _num_q; i++) {
1225 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
1226 keep_alive, complete_gc, yield);
1227 }
1228 }
1229 if (yield->should_return()) {
1230 return;
1231 }
1233 // Final references
1234 {
1235 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
1236 false, gclog_or_tty);
1237 for (int i = 0; i < _num_q; i++) {
1238 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
1239 keep_alive, complete_gc, yield);
1240 }
1241 }
1242 if (yield->should_return()) {
1243 return;
1244 }
1246 // Phantom references
1247 {
1248 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
1249 false, gclog_or_tty);
1250 for (int i = 0; i < _num_q; i++) {
1251 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
1252 keep_alive, complete_gc, yield);
1253 }
1254 }
1255 }
1257 // Walk the given discovered ref list, and remove all reference objects
1258 // whose referents are still alive, whose referents are NULL or which
1259 // are not active (have a non-NULL next field). NOTE: For this to work
1260 // correctly, refs discovery can not be happening concurrently with this
1261 // step.
1262 void
1263 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
1264 BoolObjectClosure* is_alive,
1265 OopClosure* keep_alive,
1266 VoidClosure* complete_gc,
1267 YieldClosure* yield) {
1268 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
1269 size_t length = refs_list.length();
1270 while (iter.has_next()) {
1271 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1272 oop obj = iter.obj();
1273 oop next = java_lang_ref_Reference::next(obj);
1274 if (iter.referent() == NULL || iter.is_referent_alive() ||
1275 next != NULL) {
1276 // The referent has been cleared, or is alive, or the Reference is not
1277 // active; we need to trace and mark its cohort.
1278 if (TraceReferenceGC) {
1279 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
1280 iter.obj(), iter.obj()->blueprint()->internal_name());
1281 }
1282 // Remove Reference object from list
1283 iter.remove();
1284 --length;
1285 // Keep alive its cohort.
1286 iter.make_referent_alive();
1287 if (UseCompressedOops) {
1288 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
1289 keep_alive->do_oop(next_addr);
1290 } else {
1291 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
1292 keep_alive->do_oop(next_addr);
1293 }
1294 } else {
1295 iter.next();
1296 }
1297 }
1298 refs_list.set_length(length);
1300 // Close the reachable set
1301 complete_gc->do_void();
1303 NOT_PRODUCT(
1304 if (PrintGCDetails && PrintReferenceGC) {
1305 gclog_or_tty->print(" Dropped %d Refs out of %d "
1306 "Refs in discovered list ", iter.removed(), iter.processed());
1307 }
1308 )
1309 }
1311 const char* ReferenceProcessor::list_name(int i) {
1312 assert(i >= 0 && i <= _num_q * subclasses_of_ref, "Out of bounds index");
1313 int j = i / _num_q;
1314 switch (j) {
1315 case 0: return "SoftRef";
1316 case 1: return "WeakRef";
1317 case 2: return "FinalRef";
1318 case 3: return "PhantomRef";
1319 }
1320 ShouldNotReachHere();
1321 return NULL;
1322 }
1324 #ifndef PRODUCT
1325 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1326 // empty for now
1327 }
1328 #endif
1330 void ReferenceProcessor::verify() {
1331 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
1332 }
1334 #ifndef PRODUCT
1335 void ReferenceProcessor::clear_discovered_references() {
1336 guarantee(!_discovering_refs, "Discovering refs?");
1337 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
1338 oop obj = _discoveredSoftRefs[i].head();
1339 while (obj != sentinel_ref()) {
1340 oop next = java_lang_ref_Reference::discovered(obj);
1341 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
1342 obj = next;
1343 }
1344 _discoveredSoftRefs[i].set_head(sentinel_ref());
1345 _discoveredSoftRefs[i].set_length(0);
1346 }
1347 }
1348 #endif // PRODUCT