Wed, 02 Jul 2008 12:55:16 -0700
6719955: Update copyright year
Summary: Update copyright year for files that have been modified in 2008
Reviewed-by: ohair, tbell
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_referenceProcessor.cpp.incl"
28 // List of discovered references.
29 class DiscoveredList {
30 public:
31 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
32 oop head() const {
33 return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) :
34 _oop_head;
35 }
36 HeapWord* adr_head() {
37 return UseCompressedOops ? (HeapWord*)&_compressed_head :
38 (HeapWord*)&_oop_head;
39 }
40 void set_head(oop o) {
41 if (UseCompressedOops) {
42 // Must compress the head ptr.
43 _compressed_head = oopDesc::encode_heap_oop_not_null(o);
44 } else {
45 _oop_head = o;
46 }
47 }
48 bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); }
49 size_t length() { return _len; }
50 void set_length(size_t len) { _len = len; }
51 private:
52 // Set value depending on UseCompressedOops. This could be a template class
53 // but then we have to fix all the instantiations and declarations that use this class.
54 oop _oop_head;
55 narrowOop _compressed_head;
56 size_t _len;
57 };
59 oop ReferenceProcessor::_sentinelRef = NULL;
61 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
63 void referenceProcessor_init() {
64 ReferenceProcessor::init_statics();
65 }
67 void ReferenceProcessor::init_statics() {
68 assert(_sentinelRef == NULL, "should be initialized precisely once");
69 EXCEPTION_MARK;
70 _sentinelRef = instanceKlass::cast(
71 SystemDictionary::reference_klass())->
72 allocate_permanent_instance(THREAD);
74 // Initialize the master soft ref clock.
75 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
77 if (HAS_PENDING_EXCEPTION) {
78 Handle ex(THREAD, PENDING_EXCEPTION);
79 vm_exit_during_initialization(ex);
80 }
81 assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
82 "Just constructed it!");
83 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
84 RefDiscoveryPolicy == ReferentBasedDiscovery,
85 "Unrecongnized RefDiscoveryPolicy");
86 }
88 ReferenceProcessor*
89 ReferenceProcessor::create_ref_processor(MemRegion span,
90 bool atomic_discovery,
91 bool mt_discovery,
92 BoolObjectClosure* is_alive_non_header,
93 int parallel_gc_threads,
94 bool mt_processing) {
95 int mt_degree = 1;
96 if (parallel_gc_threads > 1) {
97 mt_degree = parallel_gc_threads;
98 }
99 ReferenceProcessor* rp =
100 new ReferenceProcessor(span, atomic_discovery,
101 mt_discovery, mt_degree,
102 mt_processing && (parallel_gc_threads > 0));
103 if (rp == NULL) {
104 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
105 }
106 rp->set_is_alive_non_header(is_alive_non_header);
107 return rp;
108 }
110 ReferenceProcessor::ReferenceProcessor(MemRegion span,
111 bool atomic_discovery,
112 bool mt_discovery,
113 int mt_degree,
114 bool mt_processing) :
115 _discovering_refs(false),
116 _enqueuing_is_done(false),
117 _is_alive_non_header(NULL),
118 _processing_is_mt(mt_processing),
119 _next_id(0)
120 {
121 _span = span;
122 _discovery_is_atomic = atomic_discovery;
123 _discovery_is_mt = mt_discovery;
124 _num_q = mt_degree;
125 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _num_q * subclasses_of_ref);
126 if (_discoveredSoftRefs == NULL) {
127 vm_exit_during_initialization("Could not allocated RefProc Array");
128 }
129 _discoveredWeakRefs = &_discoveredSoftRefs[_num_q];
130 _discoveredFinalRefs = &_discoveredWeakRefs[_num_q];
131 _discoveredPhantomRefs = &_discoveredFinalRefs[_num_q];
132 assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
133 // Initialized all entries to _sentinelRef
134 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
135 _discoveredSoftRefs[i].set_head(sentinel_ref());
136 _discoveredSoftRefs[i].set_length(0);
137 }
138 }
140 #ifndef PRODUCT
141 void ReferenceProcessor::verify_no_references_recorded() {
142 guarantee(!_discovering_refs, "Discovering refs?");
143 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
144 guarantee(_discoveredSoftRefs[i].empty(),
145 "Found non-empty discovered list");
146 }
147 }
148 #endif
150 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
151 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
152 if (UseCompressedOops) {
153 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
154 } else {
155 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
156 }
157 }
158 }
160 void ReferenceProcessor::oops_do(OopClosure* f) {
161 f->do_oop(adr_sentinel_ref());
162 }
164 void ReferenceProcessor::update_soft_ref_master_clock() {
165 // Update (advance) the soft ref master clock field. This must be done
166 // after processing the soft ref list.
167 jlong now = os::javaTimeMillis();
168 jlong clock = java_lang_ref_SoftReference::clock();
169 NOT_PRODUCT(
170 if (now < clock) {
171 warning("time warp: %d to %d", clock, now);
172 }
173 )
174 // In product mode, protect ourselves from system time being adjusted
175 // externally and going backward; see note in the implementation of
176 // GenCollectedHeap::time_since_last_gc() for the right way to fix
177 // this uniformly throughout the VM; see bug-id 4741166. XXX
178 if (now > clock) {
179 java_lang_ref_SoftReference::set_clock(now);
180 }
181 // Else leave clock stalled at its old value until time progresses
182 // past clock value.
183 }
185 void ReferenceProcessor::process_discovered_references(
186 ReferencePolicy* policy,
187 BoolObjectClosure* is_alive,
188 OopClosure* keep_alive,
189 VoidClosure* complete_gc,
190 AbstractRefProcTaskExecutor* task_executor) {
191 NOT_PRODUCT(verify_ok_to_handle_reflists());
193 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
194 // Stop treating discovered references specially.
195 disable_discovery();
197 bool trace_time = PrintGCDetails && PrintReferenceGC;
198 // Soft references
199 {
200 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
201 process_discovered_reflist(_discoveredSoftRefs, policy, true,
202 is_alive, keep_alive, complete_gc, task_executor);
203 }
205 update_soft_ref_master_clock();
207 // Weak references
208 {
209 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
210 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
211 is_alive, keep_alive, complete_gc, task_executor);
212 }
214 // Final references
215 {
216 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
217 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
218 is_alive, keep_alive, complete_gc, task_executor);
219 }
221 // Phantom references
222 {
223 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
224 process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
225 is_alive, keep_alive, complete_gc, task_executor);
226 }
228 // Weak global JNI references. It would make more sense (semantically) to
229 // traverse these simultaneously with the regular weak references above, but
230 // that is not how the JDK1.2 specification is. See #4126360. Native code can
231 // thus use JNI weak references to circumvent the phantom references and
232 // resurrect a "post-mortem" object.
233 {
234 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
235 if (task_executor != NULL) {
236 task_executor->set_single_threaded_mode();
237 }
238 process_phaseJNI(is_alive, keep_alive, complete_gc);
239 }
240 }
242 #ifndef PRODUCT
243 // Calculate the number of jni handles.
244 uint ReferenceProcessor::count_jni_refs() {
245 class AlwaysAliveClosure: public BoolObjectClosure {
246 public:
247 virtual bool do_object_b(oop obj) { return true; }
248 virtual void do_object(oop obj) { assert(false, "Don't call"); }
249 };
251 class CountHandleClosure: public OopClosure {
252 private:
253 int _count;
254 public:
255 CountHandleClosure(): _count(0) {}
256 void do_oop(oop* unused) { _count++; }
257 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
258 int count() { return _count; }
259 };
260 CountHandleClosure global_handle_count;
261 AlwaysAliveClosure always_alive;
262 JNIHandles::weak_oops_do(&always_alive, &global_handle_count);
263 return global_handle_count.count();
264 }
265 #endif
267 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
268 OopClosure* keep_alive,
269 VoidClosure* complete_gc) {
270 #ifndef PRODUCT
271 if (PrintGCDetails && PrintReferenceGC) {
272 unsigned int count = count_jni_refs();
273 gclog_or_tty->print(", %u refs", count);
274 }
275 #endif
276 JNIHandles::weak_oops_do(is_alive, keep_alive);
277 // Finally remember to keep sentinel around
278 keep_alive->do_oop(adr_sentinel_ref());
279 complete_gc->do_void();
280 }
283 template <class T>
284 static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
285 AbstractRefProcTaskExecutor* task_executor) {
287 // Remember old value of pending references list
288 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
289 T old_pending_list_value = *pending_list_addr;
291 // Enqueue references that are not made active again, and
292 // clear the decks for the next collection (cycle).
293 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
294 // Do the oop-check on pending_list_addr missed in
295 // enqueue_discovered_reflist. We should probably
296 // do a raw oop_check so that future such idempotent
297 // oop_stores relying on the oop-check side-effect
298 // may be elided automatically and safely without
299 // affecting correctness.
300 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
302 // Stop treating discovered references specially.
303 ref->disable_discovery();
305 // Return true if new pending references were added
306 return old_pending_list_value != *pending_list_addr;
307 }
309 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
310 NOT_PRODUCT(verify_ok_to_handle_reflists());
311 if (UseCompressedOops) {
312 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
313 } else {
314 return enqueue_discovered_ref_helper<oop>(this, task_executor);
315 }
316 }
318 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
319 HeapWord* pending_list_addr) {
320 // Given a list of refs linked through the "discovered" field
321 // (java.lang.ref.Reference.discovered) chain them through the
322 // "next" field (java.lang.ref.Reference.next) and prepend
323 // to the pending list.
324 if (TraceReferenceGC && PrintGCDetails) {
325 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
326 INTPTR_FORMAT, (address)refs_list.head());
327 }
328 oop obj = refs_list.head();
329 // Walk down the list, copying the discovered field into
330 // the next field and clearing it (except for the last
331 // non-sentinel object which is treated specially to avoid
332 // confusion with an active reference).
333 while (obj != sentinel_ref()) {
334 assert(obj->is_instanceRef(), "should be reference object");
335 oop next = java_lang_ref_Reference::discovered(obj);
336 if (TraceReferenceGC && PrintGCDetails) {
337 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
338 obj, next);
339 }
340 assert(java_lang_ref_Reference::next(obj) == NULL,
341 "The reference should not be enqueued");
342 if (next == sentinel_ref()) { // obj is last
343 // Swap refs_list into pendling_list_addr and
344 // set obj's next to what we read from pending_list_addr.
345 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
346 // Need oop_check on pending_list_addr above;
347 // see special oop-check code at the end of
348 // enqueue_discovered_reflists() further below.
349 if (old == NULL) {
350 // obj should be made to point to itself, since
351 // pending list was empty.
352 java_lang_ref_Reference::set_next(obj, obj);
353 } else {
354 java_lang_ref_Reference::set_next(obj, old);
355 }
356 } else {
357 java_lang_ref_Reference::set_next(obj, next);
358 }
359 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
360 obj = next;
361 }
362 }
364 // Parallel enqueue task
365 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
366 public:
367 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
368 DiscoveredList discovered_refs[],
369 HeapWord* pending_list_addr,
370 oop sentinel_ref,
371 int n_queues)
372 : EnqueueTask(ref_processor, discovered_refs,
373 pending_list_addr, sentinel_ref, n_queues)
374 { }
376 virtual void work(unsigned int work_id) {
377 assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
378 // Simplest first cut: static partitioning.
379 int index = work_id;
380 for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) {
381 _ref_processor.enqueue_discovered_reflist(
382 _refs_lists[index], _pending_list_addr);
383 _refs_lists[index].set_head(_sentinel_ref);
384 _refs_lists[index].set_length(0);
385 }
386 }
387 };
389 // Enqueue references that are not made active again
390 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
391 AbstractRefProcTaskExecutor* task_executor) {
392 if (_processing_is_mt && task_executor != NULL) {
393 // Parallel code
394 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
395 pending_list_addr, sentinel_ref(), _num_q);
396 task_executor->execute(tsk);
397 } else {
398 // Serial code: call the parent class's implementation
399 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
400 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
401 _discoveredSoftRefs[i].set_head(sentinel_ref());
402 _discoveredSoftRefs[i].set_length(0);
403 }
404 }
405 }
407 // Iterator for the list of discovered references.
408 class DiscoveredListIterator {
409 public:
410 inline DiscoveredListIterator(DiscoveredList& refs_list,
411 OopClosure* keep_alive,
412 BoolObjectClosure* is_alive);
414 // End Of List.
415 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
417 // Get oop to the Reference object.
418 inline oop obj() const { return _ref; }
420 // Get oop to the referent object.
421 inline oop referent() const { return _referent; }
423 // Returns true if referent is alive.
424 inline bool is_referent_alive() const;
426 // Loads data for the current reference.
427 // The "allow_null_referent" argument tells us to allow for the possibility
428 // of a NULL referent in the discovered Reference object. This typically
429 // happens in the case of concurrent collectors that may have done the
430 // discovery concurrently or interleaved with mutator execution.
431 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
433 // Move to the next discovered reference.
434 inline void next();
436 // Remove the current reference from the list and move to the next.
437 inline void remove();
439 // Make the Reference object active again.
440 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
442 // Make the referent alive.
443 inline void make_referent_alive() {
444 if (UseCompressedOops) {
445 _keep_alive->do_oop((narrowOop*)_referent_addr);
446 } else {
447 _keep_alive->do_oop((oop*)_referent_addr);
448 }
449 }
451 // Update the discovered field.
452 inline void update_discovered() {
453 // First _prev_next ref actually points into DiscoveredList (gross).
454 if (UseCompressedOops) {
455 _keep_alive->do_oop((narrowOop*)_prev_next);
456 } else {
457 _keep_alive->do_oop((oop*)_prev_next);
458 }
459 }
461 // NULL out referent pointer.
462 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
464 // Statistics
465 NOT_PRODUCT(
466 inline size_t processed() const { return _processed; }
467 inline size_t removed() const { return _removed; }
468 )
470 private:
471 inline void move_to_next();
473 private:
474 DiscoveredList& _refs_list;
475 HeapWord* _prev_next;
476 oop _ref;
477 HeapWord* _discovered_addr;
478 oop _next;
479 HeapWord* _referent_addr;
480 oop _referent;
481 OopClosure* _keep_alive;
482 BoolObjectClosure* _is_alive;
483 DEBUG_ONLY(
484 oop _first_seen; // cyclic linked list check
485 )
486 NOT_PRODUCT(
487 size_t _processed;
488 size_t _removed;
489 )
490 };
492 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
493 OopClosure* keep_alive,
494 BoolObjectClosure* is_alive)
495 : _refs_list(refs_list),
496 _prev_next(refs_list.adr_head()),
497 _ref(refs_list.head()),
498 #ifdef ASSERT
499 _first_seen(refs_list.head()),
500 #endif
501 #ifndef PRODUCT
502 _processed(0),
503 _removed(0),
504 #endif
505 _next(refs_list.head()),
506 _keep_alive(keep_alive),
507 _is_alive(is_alive)
508 { }
510 inline bool DiscoveredListIterator::is_referent_alive() const {
511 return _is_alive->do_object_b(_referent);
512 }
514 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
515 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
516 oop discovered = java_lang_ref_Reference::discovered(_ref);
517 assert(_discovered_addr && discovered->is_oop_or_null(),
518 "discovered field is bad");
519 _next = discovered;
520 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
521 _referent = java_lang_ref_Reference::referent(_ref);
522 assert(Universe::heap()->is_in_reserved_or_null(_referent),
523 "Wrong oop found in java.lang.Reference object");
524 assert(allow_null_referent ?
525 _referent->is_oop_or_null()
526 : _referent->is_oop(),
527 "bad referent");
528 }
530 inline void DiscoveredListIterator::next() {
531 _prev_next = _discovered_addr;
532 move_to_next();
533 }
535 inline void DiscoveredListIterator::remove() {
536 assert(_ref->is_oop(), "Dropping a bad reference");
537 oop_store_raw(_discovered_addr, NULL);
538 // First _prev_next ref actually points into DiscoveredList (gross).
539 if (UseCompressedOops) {
540 // Remove Reference object from list.
541 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
542 } else {
543 // Remove Reference object from list.
544 oopDesc::store_heap_oop((oop*)_prev_next, _next);
545 }
546 NOT_PRODUCT(_removed++);
547 move_to_next();
548 }
550 inline void DiscoveredListIterator::move_to_next() {
551 _ref = _next;
552 assert(_ref != _first_seen, "cyclic ref_list found");
553 NOT_PRODUCT(_processed++);
554 }
556 // NOTE: process_phase*() are largely similar, and at a high level
557 // merely iterate over the extant list applying a predicate to
558 // each of its elements and possibly removing that element from the
559 // list and applying some further closures to that element.
560 // We should consider the possibility of replacing these
561 // process_phase*() methods by abstracting them into
562 // a single general iterator invocation that receives appropriate
563 // closures that accomplish this work.
565 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
566 // referents are not alive, but that should be kept alive for policy reasons.
567 // Keep alive the transitive closure of all such referents.
568 void
569 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
570 ReferencePolicy* policy,
571 BoolObjectClosure* is_alive,
572 OopClosure* keep_alive,
573 VoidClosure* complete_gc) {
574 assert(policy != NULL, "Must have a non-NULL policy");
575 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
576 // Decide which softly reachable refs should be kept alive.
577 while (iter.has_next()) {
578 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
579 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
580 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
581 if (TraceReferenceGC) {
582 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
583 iter.obj(), iter.obj()->blueprint()->internal_name());
584 }
585 // Make the Reference object active again
586 iter.make_active();
587 // keep the referent around
588 iter.make_referent_alive();
589 // Remove Reference object from list
590 iter.remove();
591 } else {
592 iter.next();
593 }
594 }
595 // Close the reachable set
596 complete_gc->do_void();
597 NOT_PRODUCT(
598 if (PrintGCDetails && TraceReferenceGC) {
599 gclog_or_tty->print(" Dropped %d dead Refs out of %d "
600 "discovered Refs by policy ", iter.removed(), iter.processed());
601 }
602 )
603 }
605 // Traverse the list and remove any Refs that are not active, or
606 // whose referents are either alive or NULL.
607 void
608 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
609 BoolObjectClosure* is_alive,
610 OopClosure* keep_alive) {
611 assert(discovery_is_atomic(), "Error");
612 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
613 while (iter.has_next()) {
614 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
615 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
616 assert(next == NULL, "Should not discover inactive Reference");
617 if (iter.is_referent_alive()) {
618 if (TraceReferenceGC) {
619 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
620 iter.obj(), iter.obj()->blueprint()->internal_name());
621 }
622 // The referent is reachable after all.
623 // Update the referent pointer as necessary: Note that this
624 // should not entail any recursive marking because the
625 // referent must already have been traversed.
626 iter.make_referent_alive();
627 // Remove Reference object from list
628 iter.remove();
629 } else {
630 iter.next();
631 }
632 }
633 NOT_PRODUCT(
634 if (PrintGCDetails && TraceReferenceGC) {
635 gclog_or_tty->print(" Dropped %d active Refs out of %d "
636 "Refs in discovered list ", iter.removed(), iter.processed());
637 }
638 )
639 }
641 void
642 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
643 BoolObjectClosure* is_alive,
644 OopClosure* keep_alive,
645 VoidClosure* complete_gc) {
646 assert(!discovery_is_atomic(), "Error");
647 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
648 while (iter.has_next()) {
649 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
650 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
651 oop next = java_lang_ref_Reference::next(iter.obj());
652 if ((iter.referent() == NULL || iter.is_referent_alive() ||
653 next != NULL)) {
654 assert(next->is_oop_or_null(), "bad next field");
655 // Remove Reference object from list
656 iter.remove();
657 // Trace the cohorts
658 iter.make_referent_alive();
659 if (UseCompressedOops) {
660 keep_alive->do_oop((narrowOop*)next_addr);
661 } else {
662 keep_alive->do_oop((oop*)next_addr);
663 }
664 } else {
665 iter.next();
666 }
667 }
668 // Now close the newly reachable set
669 complete_gc->do_void();
670 NOT_PRODUCT(
671 if (PrintGCDetails && TraceReferenceGC) {
672 gclog_or_tty->print(" Dropped %d active Refs out of %d "
673 "Refs in discovered list ", iter.removed(), iter.processed());
674 }
675 )
676 }
678 // Traverse the list and process the referents, by either
679 // clearing them or keeping them (and their reachable
680 // closure) alive.
681 void
682 ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
683 bool clear_referent,
684 BoolObjectClosure* is_alive,
685 OopClosure* keep_alive,
686 VoidClosure* complete_gc) {
687 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
688 while (iter.has_next()) {
689 iter.update_discovered();
690 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
691 if (clear_referent) {
692 // NULL out referent pointer
693 iter.clear_referent();
694 } else {
695 // keep the referent around
696 iter.make_referent_alive();
697 }
698 if (TraceReferenceGC) {
699 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
700 clear_referent ? "cleared " : "",
701 iter.obj(), iter.obj()->blueprint()->internal_name());
702 }
703 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
704 // If discovery is concurrent, we may have objects with null referents,
705 // being those that were concurrently cleared after they were discovered
706 // (and not subsequently precleaned).
707 assert( (discovery_is_atomic() && iter.referent()->is_oop())
708 || (!discovery_is_atomic() && iter.referent()->is_oop_or_null(UseConcMarkSweepGC)),
709 "Adding a bad referent");
710 iter.next();
711 }
712 // Remember to keep sentinel pointer around
713 iter.update_discovered();
714 // Close the reachable set
715 complete_gc->do_void();
716 }
718 void
719 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
720 oop obj = refs_list.head();
721 while (obj != sentinel_ref()) {
722 oop discovered = java_lang_ref_Reference::discovered(obj);
723 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
724 obj = discovered;
725 }
726 refs_list.set_head(sentinel_ref());
727 refs_list.set_length(0);
728 }
730 void
731 ReferenceProcessor::abandon_partial_discovered_list_arr(DiscoveredList refs_lists[]) {
732 for (int i = 0; i < _num_q; i++) {
733 abandon_partial_discovered_list(refs_lists[i]);
734 }
735 }
737 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
738 public:
739 RefProcPhase1Task(ReferenceProcessor& ref_processor,
740 DiscoveredList refs_lists[],
741 ReferencePolicy* policy,
742 bool marks_oops_alive)
743 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
744 _policy(policy)
745 { }
746 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
747 OopClosure& keep_alive,
748 VoidClosure& complete_gc)
749 {
750 _ref_processor.process_phase1(_refs_lists[i], _policy,
751 &is_alive, &keep_alive, &complete_gc);
752 }
753 private:
754 ReferencePolicy* _policy;
755 };
757 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
758 public:
759 RefProcPhase2Task(ReferenceProcessor& ref_processor,
760 DiscoveredList refs_lists[],
761 bool marks_oops_alive)
762 : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
763 { }
764 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
765 OopClosure& keep_alive,
766 VoidClosure& complete_gc)
767 {
768 _ref_processor.process_phase2(_refs_lists[i],
769 &is_alive, &keep_alive, &complete_gc);
770 }
771 };
773 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
774 public:
775 RefProcPhase3Task(ReferenceProcessor& ref_processor,
776 DiscoveredList refs_lists[],
777 bool clear_referent,
778 bool marks_oops_alive)
779 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
780 _clear_referent(clear_referent)
781 { }
782 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
783 OopClosure& keep_alive,
784 VoidClosure& complete_gc)
785 {
786 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
787 &is_alive, &keep_alive, &complete_gc);
788 }
789 private:
790 bool _clear_referent;
791 };
793 // Balances reference queues.
794 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
795 {
796 // calculate total length
797 size_t total_refs = 0;
798 for (int i = 0; i < _num_q; ++i) {
799 total_refs += ref_lists[i].length();
800 }
801 size_t avg_refs = total_refs / _num_q + 1;
802 int to_idx = 0;
803 for (int from_idx = 0; from_idx < _num_q; from_idx++) {
804 while (ref_lists[from_idx].length() > avg_refs) {
805 assert(to_idx < _num_q, "Sanity Check!");
806 if (ref_lists[to_idx].length() < avg_refs) {
807 // move superfluous refs
808 size_t refs_to_move =
809 MIN2(ref_lists[from_idx].length() - avg_refs,
810 avg_refs - ref_lists[to_idx].length());
811 oop move_head = ref_lists[from_idx].head();
812 oop move_tail = move_head;
813 oop new_head = move_head;
814 // find an element to split the list on
815 for (size_t j = 0; j < refs_to_move; ++j) {
816 move_tail = new_head;
817 new_head = java_lang_ref_Reference::discovered(new_head);
818 }
819 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
820 ref_lists[to_idx].set_head(move_head);
821 ref_lists[to_idx].set_length(ref_lists[to_idx].length() + refs_to_move);
822 ref_lists[from_idx].set_head(new_head);
823 ref_lists[from_idx].set_length(ref_lists[from_idx].length() - refs_to_move);
824 } else {
825 ++to_idx;
826 }
827 }
828 }
829 }
831 void
832 ReferenceProcessor::process_discovered_reflist(
833 DiscoveredList refs_lists[],
834 ReferencePolicy* policy,
835 bool clear_referent,
836 BoolObjectClosure* is_alive,
837 OopClosure* keep_alive,
838 VoidClosure* complete_gc,
839 AbstractRefProcTaskExecutor* task_executor)
840 {
841 bool mt = task_executor != NULL && _processing_is_mt;
842 if (mt && ParallelRefProcBalancingEnabled) {
843 balance_queues(refs_lists);
844 }
845 if (PrintReferenceGC && PrintGCDetails) {
846 size_t total = 0;
847 for (int i = 0; i < _num_q; ++i) {
848 total += refs_lists[i].length();
849 }
850 gclog_or_tty->print(", %u refs", total);
851 }
853 // Phase 1 (soft refs only):
854 // . Traverse the list and remove any SoftReferences whose
855 // referents are not alive, but that should be kept alive for
856 // policy reasons. Keep alive the transitive closure of all
857 // such referents.
858 if (policy != NULL) {
859 if (mt) {
860 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
861 task_executor->execute(phase1);
862 } else {
863 for (int i = 0; i < _num_q; i++) {
864 process_phase1(refs_lists[i], policy,
865 is_alive, keep_alive, complete_gc);
866 }
867 }
868 } else { // policy == NULL
869 assert(refs_lists != _discoveredSoftRefs,
870 "Policy must be specified for soft references.");
871 }
873 // Phase 2:
874 // . Traverse the list and remove any refs whose referents are alive.
875 if (mt) {
876 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
877 task_executor->execute(phase2);
878 } else {
879 for (int i = 0; i < _num_q; i++) {
880 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
881 }
882 }
884 // Phase 3:
885 // . Traverse the list and process referents as appropriate.
886 if (mt) {
887 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
888 task_executor->execute(phase3);
889 } else {
890 for (int i = 0; i < _num_q; i++) {
891 process_phase3(refs_lists[i], clear_referent,
892 is_alive, keep_alive, complete_gc);
893 }
894 }
895 }
897 void ReferenceProcessor::clean_up_discovered_references() {
898 // loop over the lists
899 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
900 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
901 gclog_or_tty->print_cr(
902 "\nScrubbing %s discovered list of Null referents",
903 list_name(i));
904 }
905 clean_up_discovered_reflist(_discoveredSoftRefs[i]);
906 }
907 }
909 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
910 assert(!discovery_is_atomic(), "Else why call this method?");
911 DiscoveredListIterator iter(refs_list, NULL, NULL);
912 size_t length = refs_list.length();
913 while (iter.has_next()) {
914 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
915 oop next = java_lang_ref_Reference::next(iter.obj());
916 assert(next->is_oop_or_null(), "bad next field");
917 // If referent has been cleared or Reference is not active,
918 // drop it.
919 if (iter.referent() == NULL || next != NULL) {
920 debug_only(
921 if (PrintGCDetails && TraceReferenceGC) {
922 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
923 INTPTR_FORMAT " with next field: " INTPTR_FORMAT
924 " and referent: " INTPTR_FORMAT,
925 iter.obj(), next, iter.referent());
926 }
927 )
928 // Remove Reference object from list
929 iter.remove();
930 --length;
931 } else {
932 iter.next();
933 }
934 }
935 refs_list.set_length(length);
936 NOT_PRODUCT(
937 if (PrintGCDetails && TraceReferenceGC) {
938 gclog_or_tty->print(
939 " Removed %d Refs with NULL referents out of %d discovered Refs",
940 iter.removed(), iter.processed());
941 }
942 )
943 }
945 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
946 int id = 0;
947 // Determine the queue index to use for this object.
948 if (_discovery_is_mt) {
949 // During a multi-threaded discovery phase,
950 // each thread saves to its "own" list.
951 Thread* thr = Thread::current();
952 assert(thr->is_GC_task_thread(),
953 "Dubious cast from Thread* to WorkerThread*?");
954 id = ((WorkerThread*)thr)->id();
955 } else {
956 // single-threaded discovery, we save in round-robin
957 // fashion to each of the lists.
958 if (_processing_is_mt) {
959 id = next_id();
960 }
961 }
962 assert(0 <= id && id < _num_q, "Id is out-of-bounds (call Freud?)");
964 // Get the discovered queue to which we will add
965 DiscoveredList* list = NULL;
966 switch (rt) {
967 case REF_OTHER:
968 // Unknown reference type, no special treatment
969 break;
970 case REF_SOFT:
971 list = &_discoveredSoftRefs[id];
972 break;
973 case REF_WEAK:
974 list = &_discoveredWeakRefs[id];
975 break;
976 case REF_FINAL:
977 list = &_discoveredFinalRefs[id];
978 break;
979 case REF_PHANTOM:
980 list = &_discoveredPhantomRefs[id];
981 break;
982 case REF_NONE:
983 // we should not reach here if we are an instanceRefKlass
984 default:
985 ShouldNotReachHere();
986 }
987 return list;
988 }
990 inline void
991 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
992 oop obj,
993 HeapWord* discovered_addr) {
994 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
995 // First we must make sure this object is only enqueued once. CAS in a non null
996 // discovered_addr.
997 oop retest = oopDesc::atomic_compare_exchange_oop(refs_list.head(), discovered_addr,
998 NULL);
999 if (retest == NULL) {
1000 // This thread just won the right to enqueue the object.
1001 // We have separate lists for enqueueing so no synchronization
1002 // is necessary.
1003 refs_list.set_head(obj);
1004 refs_list.set_length(refs_list.length() + 1);
1005 } else {
1006 // If retest was non NULL, another thread beat us to it:
1007 // The reference has already been discovered...
1008 if (TraceReferenceGC) {
1009 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1010 obj, obj->blueprint()->internal_name());
1011 }
1012 }
1013 }
1015 // We mention two of several possible choices here:
1016 // #0: if the reference object is not in the "originating generation"
1017 // (or part of the heap being collected, indicated by our "span"
1018 // we don't treat it specially (i.e. we scan it as we would
1019 // a normal oop, treating its references as strong references).
1020 // This means that references can't be enqueued unless their
1021 // referent is also in the same span. This is the simplest,
1022 // most "local" and most conservative approach, albeit one
1023 // that may cause weak references to be enqueued least promptly.
1024 // We call this choice the "ReferenceBasedDiscovery" policy.
1025 // #1: the reference object may be in any generation (span), but if
1026 // the referent is in the generation (span) being currently collected
1027 // then we can discover the reference object, provided
1028 // the object has not already been discovered by
1029 // a different concurrently running collector (as may be the
1030 // case, for instance, if the reference object is in CMS and
1031 // the referent in DefNewGeneration), and provided the processing
1032 // of this reference object by the current collector will
1033 // appear atomic to every other collector in the system.
1034 // (Thus, for instance, a concurrent collector may not
1035 // discover references in other generations even if the
1036 // referent is in its own generation). This policy may,
1037 // in certain cases, enqueue references somewhat sooner than
1038 // might Policy #0 above, but at marginally increased cost
1039 // and complexity in processing these references.
1040 // We call this choice the "RefeferentBasedDiscovery" policy.
1041 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1042 // We enqueue references only if we are discovering refs
1043 // (rather than processing discovered refs).
1044 if (!_discovering_refs || !RegisterReferences) {
1045 return false;
1046 }
1047 // We only enqueue active references.
1048 oop next = java_lang_ref_Reference::next(obj);
1049 if (next != NULL) {
1050 return false;
1051 }
1053 HeapWord* obj_addr = (HeapWord*)obj;
1054 if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1055 !_span.contains(obj_addr)) {
1056 // Reference is not in the originating generation;
1057 // don't treat it specially (i.e. we want to scan it as a normal
1058 // object with strong references).
1059 return false;
1060 }
1062 // We only enqueue references whose referents are not (yet) strongly
1063 // reachable.
1064 if (is_alive_non_header() != NULL) {
1065 oop referent = java_lang_ref_Reference::referent(obj);
1066 // We'd like to assert the following:
1067 // assert(referent != NULL, "Refs with null referents already filtered");
1068 // However, since this code may be executed concurrently with
1069 // mutators, which can clear() the referent, it is not
1070 // guaranteed that the referent is non-NULL.
1071 if (is_alive_non_header()->do_object_b(referent)) {
1072 return false; // referent is reachable
1073 }
1074 }
1076 HeapWord* discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1077 oop discovered = java_lang_ref_Reference::discovered(obj);
1078 assert(discovered->is_oop_or_null(), "bad discovered field");
1079 if (discovered != NULL) {
1080 // The reference has already been discovered...
1081 if (TraceReferenceGC) {
1082 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1083 obj, obj->blueprint()->internal_name());
1084 }
1085 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1086 // assumes that an object is not processed twice;
1087 // if it's been already discovered it must be on another
1088 // generation's discovered list; so we won't discover it.
1089 return false;
1090 } else {
1091 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1092 "Unrecognized policy");
1093 // Check assumption that an object is not potentially
1094 // discovered twice except by concurrent collectors that potentially
1095 // trace the same Reference object twice.
1096 assert(UseConcMarkSweepGC,
1097 "Only possible with a concurrent collector");
1098 return true;
1099 }
1100 }
1102 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1103 oop referent = java_lang_ref_Reference::referent(obj);
1104 assert(referent->is_oop(), "bad referent");
1105 // enqueue if and only if either:
1106 // reference is in our span or
1107 // we are an atomic collector and referent is in our span
1108 if (_span.contains(obj_addr) ||
1109 (discovery_is_atomic() && _span.contains(referent))) {
1110 // should_enqueue = true;
1111 } else {
1112 return false;
1113 }
1114 } else {
1115 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1116 _span.contains(obj_addr), "code inconsistency");
1117 }
1119 // Get the right type of discovered queue head.
1120 DiscoveredList* list = get_discovered_list(rt);
1121 if (list == NULL) {
1122 return false; // nothing special needs to be done
1123 }
1125 // We do a raw store here, the field will be visited later when
1126 // processing the discovered references.
1127 if (_discovery_is_mt) {
1128 add_to_discovered_list_mt(*list, obj, discovered_addr);
1129 } else {
1130 oop_store_raw(discovered_addr, list->head());
1131 list->set_head(obj);
1132 list->set_length(list->length() + 1);
1133 }
1135 // In the MT discovery case, it is currently possible to see
1136 // the following message multiple times if several threads
1137 // discover a reference about the same time. Only one will
1138 // however have actually added it to the disocvered queue.
1139 // One could let add_to_discovered_list_mt() return an
1140 // indication for success in queueing (by 1 thread) or
1141 // failure (by all other threads), but I decided the extra
1142 // code was not worth the effort for something that is
1143 // only used for debugging support.
1144 if (TraceReferenceGC) {
1145 oop referent = java_lang_ref_Reference::referent(obj);
1146 if (PrintGCDetails) {
1147 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
1148 obj, obj->blueprint()->internal_name());
1149 }
1150 assert(referent->is_oop(), "Enqueued a bad referent");
1151 }
1152 assert(obj->is_oop(), "Enqueued a bad reference");
1153 return true;
1154 }
1156 // Preclean the discovered references by removing those
1157 // whose referents are alive, and by marking from those that
1158 // are not active. These lists can be handled here
1159 // in any order and, indeed, concurrently.
1160 void ReferenceProcessor::preclean_discovered_references(
1161 BoolObjectClosure* is_alive,
1162 OopClosure* keep_alive,
1163 VoidClosure* complete_gc,
1164 YieldClosure* yield) {
1166 NOT_PRODUCT(verify_ok_to_handle_reflists());
1168 // Soft references
1169 {
1170 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
1171 false, gclog_or_tty);
1172 for (int i = 0; i < _num_q; i++) {
1173 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
1174 keep_alive, complete_gc, yield);
1175 }
1176 }
1177 if (yield->should_return()) {
1178 return;
1179 }
1181 // Weak references
1182 {
1183 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
1184 false, gclog_or_tty);
1185 for (int i = 0; i < _num_q; i++) {
1186 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
1187 keep_alive, complete_gc, yield);
1188 }
1189 }
1190 if (yield->should_return()) {
1191 return;
1192 }
1194 // Final references
1195 {
1196 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
1197 false, gclog_or_tty);
1198 for (int i = 0; i < _num_q; i++) {
1199 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
1200 keep_alive, complete_gc, yield);
1201 }
1202 }
1203 if (yield->should_return()) {
1204 return;
1205 }
1207 // Phantom references
1208 {
1209 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
1210 false, gclog_or_tty);
1211 for (int i = 0; i < _num_q; i++) {
1212 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
1213 keep_alive, complete_gc, yield);
1214 }
1215 }
1216 }
1218 // Walk the given discovered ref list, and remove all reference objects
1219 // whose referents are still alive, whose referents are NULL or which
1220 // are not active (have a non-NULL next field). NOTE: For this to work
1221 // correctly, refs discovery can not be happening concurrently with this
1222 // step.
1223 void
1224 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
1225 BoolObjectClosure* is_alive,
1226 OopClosure* keep_alive,
1227 VoidClosure* complete_gc,
1228 YieldClosure* yield) {
1229 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
1230 size_t length = refs_list.length();
1231 while (iter.has_next()) {
1232 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1233 oop obj = iter.obj();
1234 oop next = java_lang_ref_Reference::next(obj);
1235 if (iter.referent() == NULL || iter.is_referent_alive() ||
1236 next != NULL) {
1237 // The referent has been cleared, or is alive, or the Reference is not
1238 // active; we need to trace and mark its cohort.
1239 if (TraceReferenceGC) {
1240 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
1241 iter.obj(), iter.obj()->blueprint()->internal_name());
1242 }
1243 // Remove Reference object from list
1244 iter.remove();
1245 --length;
1246 // Keep alive its cohort.
1247 iter.make_referent_alive();
1248 if (UseCompressedOops) {
1249 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
1250 keep_alive->do_oop(next_addr);
1251 } else {
1252 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
1253 keep_alive->do_oop(next_addr);
1254 }
1255 } else {
1256 iter.next();
1257 }
1258 }
1259 refs_list.set_length(length);
1261 // Close the reachable set
1262 complete_gc->do_void();
1264 NOT_PRODUCT(
1265 if (PrintGCDetails && PrintReferenceGC) {
1266 gclog_or_tty->print(" Dropped %d Refs out of %d "
1267 "Refs in discovered list ", iter.removed(), iter.processed());
1268 }
1269 )
1270 }
1272 const char* ReferenceProcessor::list_name(int i) {
1273 assert(i >= 0 && i <= _num_q * subclasses_of_ref, "Out of bounds index");
1274 int j = i / _num_q;
1275 switch (j) {
1276 case 0: return "SoftRef";
1277 case 1: return "WeakRef";
1278 case 2: return "FinalRef";
1279 case 3: return "PhantomRef";
1280 }
1281 ShouldNotReachHere();
1282 return NULL;
1283 }
1285 #ifndef PRODUCT
1286 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1287 // empty for now
1288 }
1289 #endif
1291 void ReferenceProcessor::verify() {
1292 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
1293 }
1295 #ifndef PRODUCT
1296 void ReferenceProcessor::clear_discovered_references() {
1297 guarantee(!_discovering_refs, "Discovering refs?");
1298 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
1299 oop obj = _discoveredSoftRefs[i].head();
1300 while (obj != sentinel_ref()) {
1301 oop next = java_lang_ref_Reference::discovered(obj);
1302 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
1303 obj = next;
1304 }
1305 _discoveredSoftRefs[i].set_head(sentinel_ref());
1306 _discoveredSoftRefs[i].set_length(0);
1307 }
1308 }
1309 #endif // PRODUCT