Thu, 23 May 2013 12:44:18 +0100
Merge
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "gc_interface/collectedHeap.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "memory/referencePolicy.hpp"
31 #include "memory/referenceProcessor.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/java.hpp"
34 #include "runtime/jniHandles.hpp"
36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
38 bool ReferenceProcessor::_pending_list_uses_discovered_field = false;
39 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0;
41 void referenceProcessor_init() {
42 ReferenceProcessor::init_statics();
43 }
45 void ReferenceProcessor::init_statics() {
46 // We need a monotonically non-deccreasing time in ms but
47 // os::javaTimeMillis() does not guarantee monotonicity.
48 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
50 // Initialize the soft ref timestamp clock.
51 _soft_ref_timestamp_clock = now;
52 // Also update the soft ref clock in j.l.r.SoftReference
53 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock);
55 _always_clear_soft_ref_policy = new AlwaysClearPolicy();
56 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
57 NOT_COMPILER2(LRUCurrentHeapPolicy());
58 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
59 vm_exit_during_initialization("Could not allocate reference policy object");
60 }
61 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
62 RefDiscoveryPolicy == ReferentBasedDiscovery,
63 "Unrecongnized RefDiscoveryPolicy");
64 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
65 }
67 void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) {
68 #ifdef ASSERT
69 // Verify that we're not currently discovering refs
70 assert(!verify_disabled || !_discovering_refs, "nested call?");
72 if (check_no_refs) {
73 // Verify that the discovered lists are empty
74 verify_no_references_recorded();
75 }
76 #endif // ASSERT
78 // Someone could have modified the value of the static
79 // field in the j.l.r.SoftReference class that holds the
80 // soft reference timestamp clock using reflection or
81 // Unsafe between GCs. Unconditionally update the static
82 // field in ReferenceProcessor here so that we use the new
83 // value during reference discovery.
85 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
86 _discovering_refs = true;
87 }
89 ReferenceProcessor::ReferenceProcessor(MemRegion span,
90 bool mt_processing,
91 uint mt_processing_degree,
92 bool mt_discovery,
93 uint mt_discovery_degree,
94 bool atomic_discovery,
95 BoolObjectClosure* is_alive_non_header,
96 bool discovered_list_needs_barrier) :
97 _discovering_refs(false),
98 _enqueuing_is_done(false),
99 _is_alive_non_header(is_alive_non_header),
100 _discovered_list_needs_barrier(discovered_list_needs_barrier),
101 _bs(NULL),
102 _processing_is_mt(mt_processing),
103 _next_id(0)
104 {
105 _span = span;
106 _discovery_is_atomic = atomic_discovery;
107 _discovery_is_mt = mt_discovery;
108 _num_q = MAX2(1U, mt_processing_degree);
109 _max_num_q = MAX2(_num_q, mt_discovery_degree);
110 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList,
111 _max_num_q * number_of_subclasses_of_ref(), mtGC);
113 if (_discovered_refs == NULL) {
114 vm_exit_during_initialization("Could not allocated RefProc Array");
115 }
116 _discoveredSoftRefs = &_discovered_refs[0];
117 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q];
118 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
119 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
121 // Initialize all entries to NULL
122 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
123 _discovered_refs[i].set_head(NULL);
124 _discovered_refs[i].set_length(0);
125 }
127 // If we do barriers, cache a copy of the barrier set.
128 if (discovered_list_needs_barrier) {
129 _bs = Universe::heap()->barrier_set();
130 }
131 setup_policy(false /* default soft ref policy */);
132 }
134 #ifndef PRODUCT
135 void ReferenceProcessor::verify_no_references_recorded() {
136 guarantee(!_discovering_refs, "Discovering refs?");
137 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
138 guarantee(_discovered_refs[i].is_empty(),
139 "Found non-empty discovered list");
140 }
141 }
142 #endif
144 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
145 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
146 if (UseCompressedOops) {
147 f->do_oop((narrowOop*)_discovered_refs[i].adr_head());
148 } else {
149 f->do_oop((oop*)_discovered_refs[i].adr_head());
150 }
151 }
152 }
154 void ReferenceProcessor::update_soft_ref_master_clock() {
155 // Update (advance) the soft ref master clock field. This must be done
156 // after processing the soft ref list.
158 // We need a monotonically non-deccreasing time in ms but
159 // os::javaTimeMillis() does not guarantee monotonicity.
160 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
161 jlong soft_ref_clock = java_lang_ref_SoftReference::clock();
162 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync");
164 NOT_PRODUCT(
165 if (now < _soft_ref_timestamp_clock) {
166 warning("time warp: "INT64_FORMAT" to "INT64_FORMAT,
167 _soft_ref_timestamp_clock, now);
168 }
169 )
170 // The values of now and _soft_ref_timestamp_clock are set using
171 // javaTimeNanos(), which is guaranteed to be monotonically
172 // non-decreasing provided the underlying platform provides such
173 // a time source (and it is bug free).
174 // In product mode, however, protect ourselves from non-monotonicty.
175 if (now > _soft_ref_timestamp_clock) {
176 _soft_ref_timestamp_clock = now;
177 java_lang_ref_SoftReference::set_clock(now);
178 }
179 // Else leave clock stalled at its old value until time progresses
180 // past clock value.
181 }
183 void ReferenceProcessor::process_discovered_references(
184 BoolObjectClosure* is_alive,
185 OopClosure* keep_alive,
186 VoidClosure* complete_gc,
187 AbstractRefProcTaskExecutor* task_executor) {
188 NOT_PRODUCT(verify_ok_to_handle_reflists());
190 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
191 // Stop treating discovered references specially.
192 disable_discovery();
194 // If discovery was concurrent, someone could have modified
195 // the value of the static field in the j.l.r.SoftReference
196 // class that holds the soft reference timestamp clock using
197 // reflection or Unsafe between when discovery was enabled and
198 // now. Unconditionally update the static field in ReferenceProcessor
199 // here so that we use the new value during processing of the
200 // discovered soft refs.
202 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
204 bool trace_time = PrintGCDetails && PrintReferenceGC;
205 // Soft references
206 {
207 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
208 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
209 is_alive, keep_alive, complete_gc, task_executor);
210 }
212 update_soft_ref_master_clock();
214 // Weak references
215 {
216 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
217 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
218 is_alive, keep_alive, complete_gc, task_executor);
219 }
221 // Final references
222 {
223 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
224 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
225 is_alive, keep_alive, complete_gc, task_executor);
226 }
228 // Phantom references
229 {
230 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
231 process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
232 is_alive, keep_alive, complete_gc, task_executor);
233 }
235 // Weak global JNI references. It would make more sense (semantically) to
236 // traverse these simultaneously with the regular weak references above, but
237 // that is not how the JDK1.2 specification is. See #4126360. Native code can
238 // thus use JNI weak references to circumvent the phantom references and
239 // resurrect a "post-mortem" object.
240 {
241 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
242 if (task_executor != NULL) {
243 task_executor->set_single_threaded_mode();
244 }
245 process_phaseJNI(is_alive, keep_alive, complete_gc);
246 }
247 }
249 #ifndef PRODUCT
250 // Calculate the number of jni handles.
251 uint ReferenceProcessor::count_jni_refs() {
252 class AlwaysAliveClosure: public BoolObjectClosure {
253 public:
254 virtual bool do_object_b(oop obj) { return true; }
255 virtual void do_object(oop obj) { assert(false, "Don't call"); }
256 };
258 class CountHandleClosure: public OopClosure {
259 private:
260 int _count;
261 public:
262 CountHandleClosure(): _count(0) {}
263 void do_oop(oop* unused) { _count++; }
264 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
265 int count() { return _count; }
266 };
267 CountHandleClosure global_handle_count;
268 AlwaysAliveClosure always_alive;
269 JNIHandles::weak_oops_do(&always_alive, &global_handle_count);
270 return global_handle_count.count();
271 }
272 #endif
274 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
275 OopClosure* keep_alive,
276 VoidClosure* complete_gc) {
277 #ifndef PRODUCT
278 if (PrintGCDetails && PrintReferenceGC) {
279 unsigned int count = count_jni_refs();
280 gclog_or_tty->print(", %u refs", count);
281 }
282 #endif
283 JNIHandles::weak_oops_do(is_alive, keep_alive);
284 complete_gc->do_void();
285 }
288 template <class T>
289 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
290 AbstractRefProcTaskExecutor* task_executor) {
292 // Remember old value of pending references list
293 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
294 T old_pending_list_value = *pending_list_addr;
296 // Enqueue references that are not made active again, and
297 // clear the decks for the next collection (cycle).
298 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
299 // Do the oop-check on pending_list_addr missed in
300 // enqueue_discovered_reflist. We should probably
301 // do a raw oop_check so that future such idempotent
302 // oop_stores relying on the oop-check side-effect
303 // may be elided automatically and safely without
304 // affecting correctness.
305 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
307 // Stop treating discovered references specially.
308 ref->disable_discovery();
310 // Return true if new pending references were added
311 return old_pending_list_value != *pending_list_addr;
312 }
314 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
315 NOT_PRODUCT(verify_ok_to_handle_reflists());
316 if (UseCompressedOops) {
317 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
318 } else {
319 return enqueue_discovered_ref_helper<oop>(this, task_executor);
320 }
321 }
323 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
324 HeapWord* pending_list_addr) {
325 // Given a list of refs linked through the "discovered" field
326 // (java.lang.ref.Reference.discovered), self-loop their "next" field
327 // thus distinguishing them from active References, then
328 // prepend them to the pending list.
329 // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777),
330 // the "next" field is used to chain the pending list, not the discovered
331 // field.
333 if (TraceReferenceGC && PrintGCDetails) {
334 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
335 INTPTR_FORMAT, (address)refs_list.head());
336 }
338 oop obj = NULL;
339 oop next_d = refs_list.head();
340 if (pending_list_uses_discovered_field()) { // New behaviour
341 // Walk down the list, self-looping the next field
342 // so that the References are not considered active.
343 while (obj != next_d) {
344 obj = next_d;
345 assert(obj->is_instanceRef(), "should be reference object");
346 next_d = java_lang_ref_Reference::discovered(obj);
347 if (TraceReferenceGC && PrintGCDetails) {
348 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
349 obj, next_d);
350 }
351 assert(java_lang_ref_Reference::next(obj) == NULL,
352 "Reference not active; should not be discovered");
353 // Self-loop next, so as to make Ref not active.
354 java_lang_ref_Reference::set_next(obj, obj);
355 if (next_d == obj) { // obj is last
356 // Swap refs_list into pendling_list_addr and
357 // set obj's discovered to what we read from pending_list_addr.
358 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
359 // Need oop_check on pending_list_addr above;
360 // see special oop-check code at the end of
361 // enqueue_discovered_reflists() further below.
362 java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL
363 }
364 }
365 } else { // Old behaviour
366 // Walk down the list, copying the discovered field into
367 // the next field and clearing the discovered field.
368 while (obj != next_d) {
369 obj = next_d;
370 assert(obj->is_instanceRef(), "should be reference object");
371 next_d = java_lang_ref_Reference::discovered(obj);
372 if (TraceReferenceGC && PrintGCDetails) {
373 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
374 obj, next_d);
375 }
376 assert(java_lang_ref_Reference::next(obj) == NULL,
377 "The reference should not be enqueued");
378 if (next_d == obj) { // obj is last
379 // Swap refs_list into pendling_list_addr and
380 // set obj's next to what we read from pending_list_addr.
381 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
382 // Need oop_check on pending_list_addr above;
383 // see special oop-check code at the end of
384 // enqueue_discovered_reflists() further below.
385 if (old == NULL) {
386 // obj should be made to point to itself, since
387 // pending list was empty.
388 java_lang_ref_Reference::set_next(obj, obj);
389 } else {
390 java_lang_ref_Reference::set_next(obj, old);
391 }
392 } else {
393 java_lang_ref_Reference::set_next(obj, next_d);
394 }
395 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
396 }
397 }
398 }
400 // Parallel enqueue task
401 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
402 public:
403 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
404 DiscoveredList discovered_refs[],
405 HeapWord* pending_list_addr,
406 int n_queues)
407 : EnqueueTask(ref_processor, discovered_refs,
408 pending_list_addr, n_queues)
409 { }
411 virtual void work(unsigned int work_id) {
412 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
413 // Simplest first cut: static partitioning.
414 int index = work_id;
415 // The increment on "index" must correspond to the maximum number of queues
416 // (n_queues) with which that ReferenceProcessor was created. That
417 // is because of the "clever" way the discovered references lists were
418 // allocated and are indexed into.
419 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
420 for (int j = 0;
421 j < ReferenceProcessor::number_of_subclasses_of_ref();
422 j++, index += _n_queues) {
423 _ref_processor.enqueue_discovered_reflist(
424 _refs_lists[index], _pending_list_addr);
425 _refs_lists[index].set_head(NULL);
426 _refs_lists[index].set_length(0);
427 }
428 }
429 };
431 // Enqueue references that are not made active again
432 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
433 AbstractRefProcTaskExecutor* task_executor) {
434 if (_processing_is_mt && task_executor != NULL) {
435 // Parallel code
436 RefProcEnqueueTask tsk(*this, _discovered_refs,
437 pending_list_addr, _max_num_q);
438 task_executor->execute(tsk);
439 } else {
440 // Serial code: call the parent class's implementation
441 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
442 enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr);
443 _discovered_refs[i].set_head(NULL);
444 _discovered_refs[i].set_length(0);
445 }
446 }
447 }
449 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
450 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
451 oop discovered = java_lang_ref_Reference::discovered(_ref);
452 assert(_discovered_addr && discovered->is_oop_or_null(),
453 "discovered field is bad");
454 _next = discovered;
455 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
456 _referent = java_lang_ref_Reference::referent(_ref);
457 assert(Universe::heap()->is_in_reserved_or_null(_referent),
458 "Wrong oop found in java.lang.Reference object");
459 assert(allow_null_referent ?
460 _referent->is_oop_or_null()
461 : _referent->is_oop(),
462 "bad referent");
463 }
465 void DiscoveredListIterator::remove() {
466 assert(_ref->is_oop(), "Dropping a bad reference");
467 oop_store_raw(_discovered_addr, NULL);
469 // First _prev_next ref actually points into DiscoveredList (gross).
470 oop new_next;
471 if (_next == _ref) {
472 // At the end of the list, we should make _prev point to itself.
473 // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
474 // and _prev will be NULL.
475 new_next = _prev;
476 } else {
477 new_next = _next;
478 }
480 if (UseCompressedOops) {
481 // Remove Reference object from list.
482 oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
483 } else {
484 // Remove Reference object from list.
485 oopDesc::store_heap_oop((oop*)_prev_next, new_next);
486 }
487 NOT_PRODUCT(_removed++);
488 _refs_list.dec_length(1);
489 }
491 // Make the Reference object active again.
492 void DiscoveredListIterator::make_active() {
493 // For G1 we don't want to use set_next - it
494 // will dirty the card for the next field of
495 // the reference object and will fail
496 // CT verification.
497 if (UseG1GC) {
498 BarrierSet* bs = oopDesc::bs();
499 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
501 if (UseCompressedOops) {
502 bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
503 } else {
504 bs->write_ref_field_pre((oop*)next_addr, NULL);
505 }
506 java_lang_ref_Reference::set_next_raw(_ref, NULL);
507 } else {
508 java_lang_ref_Reference::set_next(_ref, NULL);
509 }
510 }
512 void DiscoveredListIterator::clear_referent() {
513 oop_store_raw(_referent_addr, NULL);
514 }
516 // NOTE: process_phase*() are largely similar, and at a high level
517 // merely iterate over the extant list applying a predicate to
518 // each of its elements and possibly removing that element from the
519 // list and applying some further closures to that element.
520 // We should consider the possibility of replacing these
521 // process_phase*() methods by abstracting them into
522 // a single general iterator invocation that receives appropriate
523 // closures that accomplish this work.
525 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
526 // referents are not alive, but that should be kept alive for policy reasons.
527 // Keep alive the transitive closure of all such referents.
528 void
529 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
530 ReferencePolicy* policy,
531 BoolObjectClosure* is_alive,
532 OopClosure* keep_alive,
533 VoidClosure* complete_gc) {
534 assert(policy != NULL, "Must have a non-NULL policy");
535 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
536 // Decide which softly reachable refs should be kept alive.
537 while (iter.has_next()) {
538 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
539 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
540 if (referent_is_dead &&
541 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
542 if (TraceReferenceGC) {
543 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
544 iter.obj(), iter.obj()->klass()->internal_name());
545 }
546 // Remove Reference object from list
547 iter.remove();
548 // Make the Reference object active again
549 iter.make_active();
550 // keep the referent around
551 iter.make_referent_alive();
552 iter.move_to_next();
553 } else {
554 iter.next();
555 }
556 }
557 // Close the reachable set
558 complete_gc->do_void();
559 NOT_PRODUCT(
560 if (PrintGCDetails && TraceReferenceGC) {
561 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
562 "discovered Refs by policy, from list " INTPTR_FORMAT,
563 iter.removed(), iter.processed(), (address)refs_list.head());
564 }
565 )
566 }
568 // Traverse the list and remove any Refs that are not active, or
569 // whose referents are either alive or NULL.
570 void
571 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
572 BoolObjectClosure* is_alive,
573 OopClosure* keep_alive) {
574 assert(discovery_is_atomic(), "Error");
575 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
576 while (iter.has_next()) {
577 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
578 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
579 assert(next == NULL, "Should not discover inactive Reference");
580 if (iter.is_referent_alive()) {
581 if (TraceReferenceGC) {
582 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
583 iter.obj(), iter.obj()->klass()->internal_name());
584 }
585 // The referent is reachable after all.
586 // Remove Reference object from list.
587 iter.remove();
588 // Update the referent pointer as necessary: Note that this
589 // should not entail any recursive marking because the
590 // referent must already have been traversed.
591 iter.make_referent_alive();
592 iter.move_to_next();
593 } else {
594 iter.next();
595 }
596 }
597 NOT_PRODUCT(
598 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
599 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
600 "Refs in discovered list " INTPTR_FORMAT,
601 iter.removed(), iter.processed(), (address)refs_list.head());
602 }
603 )
604 }
606 void
607 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
608 BoolObjectClosure* is_alive,
609 OopClosure* keep_alive,
610 VoidClosure* complete_gc) {
611 assert(!discovery_is_atomic(), "Error");
612 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
613 while (iter.has_next()) {
614 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
615 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
616 oop next = java_lang_ref_Reference::next(iter.obj());
617 if ((iter.referent() == NULL || iter.is_referent_alive() ||
618 next != NULL)) {
619 assert(next->is_oop_or_null(), "bad next field");
620 // Remove Reference object from list
621 iter.remove();
622 // Trace the cohorts
623 iter.make_referent_alive();
624 if (UseCompressedOops) {
625 keep_alive->do_oop((narrowOop*)next_addr);
626 } else {
627 keep_alive->do_oop((oop*)next_addr);
628 }
629 iter.move_to_next();
630 } else {
631 iter.next();
632 }
633 }
634 // Now close the newly reachable set
635 complete_gc->do_void();
636 NOT_PRODUCT(
637 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
638 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
639 "Refs in discovered list " INTPTR_FORMAT,
640 iter.removed(), iter.processed(), (address)refs_list.head());
641 }
642 )
643 }
645 // Traverse the list and process the referents, by either
646 // clearing them or keeping them (and their reachable
647 // closure) alive.
648 void
649 ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
650 bool clear_referent,
651 BoolObjectClosure* is_alive,
652 OopClosure* keep_alive,
653 VoidClosure* complete_gc) {
654 ResourceMark rm;
655 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
656 while (iter.has_next()) {
657 iter.update_discovered();
658 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
659 if (clear_referent) {
660 // NULL out referent pointer
661 iter.clear_referent();
662 } else {
663 // keep the referent around
664 iter.make_referent_alive();
665 }
666 if (TraceReferenceGC) {
667 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
668 clear_referent ? "cleared " : "",
669 iter.obj(), iter.obj()->klass()->internal_name());
670 }
671 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
672 iter.next();
673 }
674 // Remember to update the next pointer of the last ref.
675 iter.update_discovered();
676 // Close the reachable set
677 complete_gc->do_void();
678 }
680 void
681 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) {
682 oop obj = NULL;
683 oop next = refs_list.head();
684 while (next != obj) {
685 obj = next;
686 next = java_lang_ref_Reference::discovered(obj);
687 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
688 }
689 refs_list.set_head(NULL);
690 refs_list.set_length(0);
691 }
693 void
694 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
695 clear_discovered_references(refs_list);
696 }
698 void ReferenceProcessor::abandon_partial_discovery() {
699 // loop over the lists
700 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
701 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
702 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
703 }
704 abandon_partial_discovered_list(_discovered_refs[i]);
705 }
706 }
708 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
709 public:
710 RefProcPhase1Task(ReferenceProcessor& ref_processor,
711 DiscoveredList refs_lists[],
712 ReferencePolicy* policy,
713 bool marks_oops_alive)
714 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
715 _policy(policy)
716 { }
717 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
718 OopClosure& keep_alive,
719 VoidClosure& complete_gc)
720 {
721 Thread* thr = Thread::current();
722 int refs_list_index = ((WorkerThread*)thr)->id();
723 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy,
724 &is_alive, &keep_alive, &complete_gc);
725 }
726 private:
727 ReferencePolicy* _policy;
728 };
730 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
731 public:
732 RefProcPhase2Task(ReferenceProcessor& ref_processor,
733 DiscoveredList refs_lists[],
734 bool marks_oops_alive)
735 : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
736 { }
737 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
738 OopClosure& keep_alive,
739 VoidClosure& complete_gc)
740 {
741 _ref_processor.process_phase2(_refs_lists[i],
742 &is_alive, &keep_alive, &complete_gc);
743 }
744 };
746 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
747 public:
748 RefProcPhase3Task(ReferenceProcessor& ref_processor,
749 DiscoveredList refs_lists[],
750 bool clear_referent,
751 bool marks_oops_alive)
752 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
753 _clear_referent(clear_referent)
754 { }
755 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
756 OopClosure& keep_alive,
757 VoidClosure& complete_gc)
758 {
759 // Don't use "refs_list_index" calculated in this way because
760 // balance_queues() has moved the Ref's into the first n queues.
761 // Thread* thr = Thread::current();
762 // int refs_list_index = ((WorkerThread*)thr)->id();
763 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent,
764 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
765 &is_alive, &keep_alive, &complete_gc);
766 }
767 private:
768 bool _clear_referent;
769 };
771 void ReferenceProcessor::set_discovered(oop ref, oop value) {
772 if (_discovered_list_needs_barrier) {
773 java_lang_ref_Reference::set_discovered(ref, value);
774 } else {
775 java_lang_ref_Reference::set_discovered_raw(ref, value);
776 }
777 }
779 // Balances reference queues.
780 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
781 // queues[0, 1, ..., _num_q-1] because only the first _num_q
782 // corresponding to the active workers will be processed.
783 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
784 {
785 // calculate total length
786 size_t total_refs = 0;
787 if (TraceReferenceGC && PrintGCDetails) {
788 gclog_or_tty->print_cr("\nBalance ref_lists ");
789 }
791 for (uint i = 0; i < _max_num_q; ++i) {
792 total_refs += ref_lists[i].length();
793 if (TraceReferenceGC && PrintGCDetails) {
794 gclog_or_tty->print("%d ", ref_lists[i].length());
795 }
796 }
797 if (TraceReferenceGC && PrintGCDetails) {
798 gclog_or_tty->print_cr(" = %d", total_refs);
799 }
800 size_t avg_refs = total_refs / _num_q + 1;
801 uint to_idx = 0;
802 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) {
803 bool move_all = false;
804 if (from_idx >= _num_q) {
805 move_all = ref_lists[from_idx].length() > 0;
806 }
807 while ((ref_lists[from_idx].length() > avg_refs) ||
808 move_all) {
809 assert(to_idx < _num_q, "Sanity Check!");
810 if (ref_lists[to_idx].length() < avg_refs) {
811 // move superfluous refs
812 size_t refs_to_move;
813 // Move all the Ref's if the from queue will not be processed.
814 if (move_all) {
815 refs_to_move = MIN2(ref_lists[from_idx].length(),
816 avg_refs - ref_lists[to_idx].length());
817 } else {
818 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
819 avg_refs - ref_lists[to_idx].length());
820 }
822 assert(refs_to_move > 0, "otherwise the code below will fail");
824 oop move_head = ref_lists[from_idx].head();
825 oop move_tail = move_head;
826 oop new_head = move_head;
827 // find an element to split the list on
828 for (size_t j = 0; j < refs_to_move; ++j) {
829 move_tail = new_head;
830 new_head = java_lang_ref_Reference::discovered(new_head);
831 }
833 // Add the chain to the to list.
834 if (ref_lists[to_idx].head() == NULL) {
835 // to list is empty. Make a loop at the end.
836 set_discovered(move_tail, move_tail);
837 } else {
838 set_discovered(move_tail, ref_lists[to_idx].head());
839 }
840 ref_lists[to_idx].set_head(move_head);
841 ref_lists[to_idx].inc_length(refs_to_move);
843 // Remove the chain from the from list.
844 if (move_tail == new_head) {
845 // We found the end of the from list.
846 ref_lists[from_idx].set_head(NULL);
847 } else {
848 ref_lists[from_idx].set_head(new_head);
849 }
850 ref_lists[from_idx].dec_length(refs_to_move);
851 if (ref_lists[from_idx].length() == 0) {
852 break;
853 }
854 } else {
855 to_idx = (to_idx + 1) % _num_q;
856 }
857 }
858 }
859 #ifdef ASSERT
860 size_t balanced_total_refs = 0;
861 for (uint i = 0; i < _max_num_q; ++i) {
862 balanced_total_refs += ref_lists[i].length();
863 if (TraceReferenceGC && PrintGCDetails) {
864 gclog_or_tty->print("%d ", ref_lists[i].length());
865 }
866 }
867 if (TraceReferenceGC && PrintGCDetails) {
868 gclog_or_tty->print_cr(" = %d", balanced_total_refs);
869 gclog_or_tty->flush();
870 }
871 assert(total_refs == balanced_total_refs, "Balancing was incomplete");
872 #endif
873 }
875 void ReferenceProcessor::balance_all_queues() {
876 balance_queues(_discoveredSoftRefs);
877 balance_queues(_discoveredWeakRefs);
878 balance_queues(_discoveredFinalRefs);
879 balance_queues(_discoveredPhantomRefs);
880 }
882 void
883 ReferenceProcessor::process_discovered_reflist(
884 DiscoveredList refs_lists[],
885 ReferencePolicy* policy,
886 bool clear_referent,
887 BoolObjectClosure* is_alive,
888 OopClosure* keep_alive,
889 VoidClosure* complete_gc,
890 AbstractRefProcTaskExecutor* task_executor)
891 {
892 bool mt_processing = task_executor != NULL && _processing_is_mt;
893 // If discovery used MT and a dynamic number of GC threads, then
894 // the queues must be balanced for correctness if fewer than the
895 // maximum number of queues were used. The number of queue used
896 // during discovery may be different than the number to be used
897 // for processing so don't depend of _num_q < _max_num_q as part
898 // of the test.
899 bool must_balance = _discovery_is_mt;
901 if ((mt_processing && ParallelRefProcBalancingEnabled) ||
902 must_balance) {
903 balance_queues(refs_lists);
904 }
905 if (PrintReferenceGC && PrintGCDetails) {
906 size_t total = 0;
907 for (uint i = 0; i < _max_num_q; ++i) {
908 total += refs_lists[i].length();
909 }
910 gclog_or_tty->print(", %u refs", total);
911 }
913 // Phase 1 (soft refs only):
914 // . Traverse the list and remove any SoftReferences whose
915 // referents are not alive, but that should be kept alive for
916 // policy reasons. Keep alive the transitive closure of all
917 // such referents.
918 if (policy != NULL) {
919 if (mt_processing) {
920 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
921 task_executor->execute(phase1);
922 } else {
923 for (uint i = 0; i < _max_num_q; i++) {
924 process_phase1(refs_lists[i], policy,
925 is_alive, keep_alive, complete_gc);
926 }
927 }
928 } else { // policy == NULL
929 assert(refs_lists != _discoveredSoftRefs,
930 "Policy must be specified for soft references.");
931 }
933 // Phase 2:
934 // . Traverse the list and remove any refs whose referents are alive.
935 if (mt_processing) {
936 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
937 task_executor->execute(phase2);
938 } else {
939 for (uint i = 0; i < _max_num_q; i++) {
940 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
941 }
942 }
944 // Phase 3:
945 // . Traverse the list and process referents as appropriate.
946 if (mt_processing) {
947 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
948 task_executor->execute(phase3);
949 } else {
950 for (uint i = 0; i < _max_num_q; i++) {
951 process_phase3(refs_lists[i], clear_referent,
952 is_alive, keep_alive, complete_gc);
953 }
954 }
955 }
957 void ReferenceProcessor::clean_up_discovered_references() {
958 // loop over the lists
959 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
960 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
961 gclog_or_tty->print_cr(
962 "\nScrubbing %s discovered list of Null referents",
963 list_name(i));
964 }
965 clean_up_discovered_reflist(_discovered_refs[i]);
966 }
967 }
969 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
970 assert(!discovery_is_atomic(), "Else why call this method?");
971 DiscoveredListIterator iter(refs_list, NULL, NULL);
972 while (iter.has_next()) {
973 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
974 oop next = java_lang_ref_Reference::next(iter.obj());
975 assert(next->is_oop_or_null(), "bad next field");
976 // If referent has been cleared or Reference is not active,
977 // drop it.
978 if (iter.referent() == NULL || next != NULL) {
979 debug_only(
980 if (PrintGCDetails && TraceReferenceGC) {
981 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
982 INTPTR_FORMAT " with next field: " INTPTR_FORMAT
983 " and referent: " INTPTR_FORMAT,
984 iter.obj(), next, iter.referent());
985 }
986 )
987 // Remove Reference object from list
988 iter.remove();
989 iter.move_to_next();
990 } else {
991 iter.next();
992 }
993 }
994 NOT_PRODUCT(
995 if (PrintGCDetails && TraceReferenceGC) {
996 gclog_or_tty->print(
997 " Removed %d Refs with NULL referents out of %d discovered Refs",
998 iter.removed(), iter.processed());
999 }
1000 )
1001 }
1003 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
1004 uint id = 0;
1005 // Determine the queue index to use for this object.
1006 if (_discovery_is_mt) {
1007 // During a multi-threaded discovery phase,
1008 // each thread saves to its "own" list.
1009 Thread* thr = Thread::current();
1010 id = thr->as_Worker_thread()->id();
1011 } else {
1012 // single-threaded discovery, we save in round-robin
1013 // fashion to each of the lists.
1014 if (_processing_is_mt) {
1015 id = next_id();
1016 }
1017 }
1018 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)");
1020 // Get the discovered queue to which we will add
1021 DiscoveredList* list = NULL;
1022 switch (rt) {
1023 case REF_OTHER:
1024 // Unknown reference type, no special treatment
1025 break;
1026 case REF_SOFT:
1027 list = &_discoveredSoftRefs[id];
1028 break;
1029 case REF_WEAK:
1030 list = &_discoveredWeakRefs[id];
1031 break;
1032 case REF_FINAL:
1033 list = &_discoveredFinalRefs[id];
1034 break;
1035 case REF_PHANTOM:
1036 list = &_discoveredPhantomRefs[id];
1037 break;
1038 case REF_NONE:
1039 // we should not reach here if we are an InstanceRefKlass
1040 default:
1041 ShouldNotReachHere();
1042 }
1043 if (TraceReferenceGC && PrintGCDetails) {
1044 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list);
1045 }
1046 return list;
1047 }
1049 inline void
1050 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1051 oop obj,
1052 HeapWord* discovered_addr) {
1053 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1054 // First we must make sure this object is only enqueued once. CAS in a non null
1055 // discovered_addr.
1056 oop current_head = refs_list.head();
1057 // The last ref must have its discovered field pointing to itself.
1058 oop next_discovered = (current_head != NULL) ? current_head : obj;
1060 // Note: In the case of G1, this specific pre-barrier is strictly
1061 // not necessary because the only case we are interested in
1062 // here is when *discovered_addr is NULL (see the CAS further below),
1063 // so this will expand to nothing. As a result, we have manually
1064 // elided this out for G1, but left in the test for some future
1065 // collector that might have need for a pre-barrier here, e.g.:-
1066 // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
1067 assert(!_discovered_list_needs_barrier || UseG1GC,
1068 "Need to check non-G1 collector: "
1069 "may need a pre-write-barrier for CAS from NULL below");
1070 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
1071 NULL);
1072 if (retest == NULL) {
1073 // This thread just won the right to enqueue the object.
1074 // We have separate lists for enqueueing, so no synchronization
1075 // is necessary.
1076 refs_list.set_head(obj);
1077 refs_list.inc_length(1);
1078 if (_discovered_list_needs_barrier) {
1079 _bs->write_ref_field((void*)discovered_addr, next_discovered);
1080 }
1082 if (TraceReferenceGC) {
1083 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
1084 obj, obj->klass()->internal_name());
1085 }
1086 } else {
1087 // If retest was non NULL, another thread beat us to it:
1088 // The reference has already been discovered...
1089 if (TraceReferenceGC) {
1090 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
1091 obj, obj->klass()->internal_name());
1092 }
1093 }
1094 }
1096 #ifndef PRODUCT
1097 // Non-atomic (i.e. concurrent) discovery might allow us
1098 // to observe j.l.References with NULL referents, being those
1099 // cleared concurrently by mutators during (or after) discovery.
1100 void ReferenceProcessor::verify_referent(oop obj) {
1101 bool da = discovery_is_atomic();
1102 oop referent = java_lang_ref_Reference::referent(obj);
1103 assert(da ? referent->is_oop() : referent->is_oop_or_null(),
1104 err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
1105 INTPTR_FORMAT " during %satomic discovery ",
1106 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
1107 }
1108 #endif
1110 // We mention two of several possible choices here:
1111 // #0: if the reference object is not in the "originating generation"
1112 // (or part of the heap being collected, indicated by our "span"
1113 // we don't treat it specially (i.e. we scan it as we would
1114 // a normal oop, treating its references as strong references).
1115 // This means that references can't be discovered unless their
1116 // referent is also in the same span. This is the simplest,
1117 // most "local" and most conservative approach, albeit one
1118 // that may cause weak references to be enqueued least promptly.
1119 // We call this choice the "ReferenceBasedDiscovery" policy.
1120 // #1: the reference object may be in any generation (span), but if
1121 // the referent is in the generation (span) being currently collected
1122 // then we can discover the reference object, provided
1123 // the object has not already been discovered by
1124 // a different concurrently running collector (as may be the
1125 // case, for instance, if the reference object is in CMS and
1126 // the referent in DefNewGeneration), and provided the processing
1127 // of this reference object by the current collector will
1128 // appear atomic to every other collector in the system.
1129 // (Thus, for instance, a concurrent collector may not
1130 // discover references in other generations even if the
1131 // referent is in its own generation). This policy may,
1132 // in certain cases, enqueue references somewhat sooner than
1133 // might Policy #0 above, but at marginally increased cost
1134 // and complexity in processing these references.
1135 // We call this choice the "RefeferentBasedDiscovery" policy.
1136 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1137 // Make sure we are discovering refs (rather than processing discovered refs).
1138 if (!_discovering_refs || !RegisterReferences) {
1139 return false;
1140 }
1141 // We only discover active references.
1142 oop next = java_lang_ref_Reference::next(obj);
1143 if (next != NULL) { // Ref is no longer active
1144 return false;
1145 }
1147 HeapWord* obj_addr = (HeapWord*)obj;
1148 if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1149 !_span.contains(obj_addr)) {
1150 // Reference is not in the originating generation;
1151 // don't treat it specially (i.e. we want to scan it as a normal
1152 // object with strong references).
1153 return false;
1154 }
1156 // We only discover references whose referents are not (yet)
1157 // known to be strongly reachable.
1158 if (is_alive_non_header() != NULL) {
1159 verify_referent(obj);
1160 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1161 return false; // referent is reachable
1162 }
1163 }
1164 if (rt == REF_SOFT) {
1165 // For soft refs we can decide now if these are not
1166 // current candidates for clearing, in which case we
1167 // can mark through them now, rather than delaying that
1168 // to the reference-processing phase. Since all current
1169 // time-stamp policies advance the soft-ref clock only
1170 // at a major collection cycle, this is always currently
1171 // accurate.
1172 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) {
1173 return false;
1174 }
1175 }
1177 ResourceMark rm; // Needed for tracing.
1179 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1180 const oop discovered = java_lang_ref_Reference::discovered(obj);
1181 assert(discovered->is_oop_or_null(), "bad discovered field");
1182 if (discovered != NULL) {
1183 // The reference has already been discovered...
1184 if (TraceReferenceGC) {
1185 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
1186 obj, obj->klass()->internal_name());
1187 }
1188 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1189 // assumes that an object is not processed twice;
1190 // if it's been already discovered it must be on another
1191 // generation's discovered list; so we won't discover it.
1192 return false;
1193 } else {
1194 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1195 "Unrecognized policy");
1196 // Check assumption that an object is not potentially
1197 // discovered twice except by concurrent collectors that potentially
1198 // trace the same Reference object twice.
1199 assert(UseConcMarkSweepGC || UseG1GC,
1200 "Only possible with a concurrent marking collector");
1201 return true;
1202 }
1203 }
1205 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1206 verify_referent(obj);
1207 // Discover if and only if EITHER:
1208 // .. reference is in our span, OR
1209 // .. we are an atomic collector and referent is in our span
1210 if (_span.contains(obj_addr) ||
1211 (discovery_is_atomic() &&
1212 _span.contains(java_lang_ref_Reference::referent(obj)))) {
1213 // should_enqueue = true;
1214 } else {
1215 return false;
1216 }
1217 } else {
1218 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1219 _span.contains(obj_addr), "code inconsistency");
1220 }
1222 // Get the right type of discovered queue head.
1223 DiscoveredList* list = get_discovered_list(rt);
1224 if (list == NULL) {
1225 return false; // nothing special needs to be done
1226 }
1228 if (_discovery_is_mt) {
1229 add_to_discovered_list_mt(*list, obj, discovered_addr);
1230 } else {
1231 // If "_discovered_list_needs_barrier", we do write barriers when
1232 // updating the discovered reference list. Otherwise, we do a raw store
1233 // here: the field will be visited later when processing the discovered
1234 // references.
1235 oop current_head = list->head();
1236 // The last ref must have its discovered field pointing to itself.
1237 oop next_discovered = (current_head != NULL) ? current_head : obj;
1239 // As in the case further above, since we are over-writing a NULL
1240 // pre-value, we can safely elide the pre-barrier here for the case of G1.
1241 // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
1242 assert(discovered == NULL, "control point invariant");
1243 assert(!_discovered_list_needs_barrier || UseG1GC,
1244 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
1245 oop_store_raw(discovered_addr, next_discovered);
1246 if (_discovered_list_needs_barrier) {
1247 _bs->write_ref_field((void*)discovered_addr, next_discovered);
1248 }
1249 list->set_head(obj);
1250 list->inc_length(1);
1252 if (TraceReferenceGC) {
1253 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
1254 obj, obj->klass()->internal_name());
1255 }
1256 }
1257 assert(obj->is_oop(), "Discovered a bad reference");
1258 verify_referent(obj);
1259 return true;
1260 }
1262 // Preclean the discovered references by removing those
1263 // whose referents are alive, and by marking from those that
1264 // are not active. These lists can be handled here
1265 // in any order and, indeed, concurrently.
1266 void ReferenceProcessor::preclean_discovered_references(
1267 BoolObjectClosure* is_alive,
1268 OopClosure* keep_alive,
1269 VoidClosure* complete_gc,
1270 YieldClosure* yield) {
1272 NOT_PRODUCT(verify_ok_to_handle_reflists());
1274 // Soft references
1275 {
1276 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
1277 false, gclog_or_tty);
1278 for (uint i = 0; i < _max_num_q; i++) {
1279 if (yield->should_return()) {
1280 return;
1281 }
1282 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
1283 keep_alive, complete_gc, yield);
1284 }
1285 }
1287 // Weak references
1288 {
1289 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
1290 false, gclog_or_tty);
1291 for (uint i = 0; i < _max_num_q; i++) {
1292 if (yield->should_return()) {
1293 return;
1294 }
1295 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
1296 keep_alive, complete_gc, yield);
1297 }
1298 }
1300 // Final references
1301 {
1302 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
1303 false, gclog_or_tty);
1304 for (uint i = 0; i < _max_num_q; i++) {
1305 if (yield->should_return()) {
1306 return;
1307 }
1308 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
1309 keep_alive, complete_gc, yield);
1310 }
1311 }
1313 // Phantom references
1314 {
1315 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
1316 false, gclog_or_tty);
1317 for (uint i = 0; i < _max_num_q; i++) {
1318 if (yield->should_return()) {
1319 return;
1320 }
1321 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
1322 keep_alive, complete_gc, yield);
1323 }
1324 }
1325 }
1327 // Walk the given discovered ref list, and remove all reference objects
1328 // whose referents are still alive, whose referents are NULL or which
1329 // are not active (have a non-NULL next field). NOTE: When we are
1330 // thus precleaning the ref lists (which happens single-threaded today),
1331 // we do not disable refs discovery to honour the correct semantics of
1332 // java.lang.Reference. As a result, we need to be careful below
1333 // that ref removal steps interleave safely with ref discovery steps
1334 // (in this thread).
1335 void
1336 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
1337 BoolObjectClosure* is_alive,
1338 OopClosure* keep_alive,
1339 VoidClosure* complete_gc,
1340 YieldClosure* yield) {
1341 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
1342 while (iter.has_next()) {
1343 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1344 oop obj = iter.obj();
1345 oop next = java_lang_ref_Reference::next(obj);
1346 if (iter.referent() == NULL || iter.is_referent_alive() ||
1347 next != NULL) {
1348 // The referent has been cleared, or is alive, or the Reference is not
1349 // active; we need to trace and mark its cohort.
1350 if (TraceReferenceGC) {
1351 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
1352 iter.obj(), iter.obj()->klass()->internal_name());
1353 }
1354 // Remove Reference object from list
1355 iter.remove();
1356 // Keep alive its cohort.
1357 iter.make_referent_alive();
1358 if (UseCompressedOops) {
1359 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
1360 keep_alive->do_oop(next_addr);
1361 } else {
1362 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
1363 keep_alive->do_oop(next_addr);
1364 }
1365 iter.move_to_next();
1366 } else {
1367 iter.next();
1368 }
1369 }
1370 // Close the reachable set
1371 complete_gc->do_void();
1373 NOT_PRODUCT(
1374 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
1375 gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
1376 "Refs in discovered list " INTPTR_FORMAT,
1377 iter.removed(), iter.processed(), (address)refs_list.head());
1378 }
1379 )
1380 }
1382 const char* ReferenceProcessor::list_name(uint i) {
1383 assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
1384 "Out of bounds index");
1386 int j = i / _max_num_q;
1387 switch (j) {
1388 case 0: return "SoftRef";
1389 case 1: return "WeakRef";
1390 case 2: return "FinalRef";
1391 case 3: return "PhantomRef";
1392 }
1393 ShouldNotReachHere();
1394 return NULL;
1395 }
1397 #ifndef PRODUCT
1398 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1399 // empty for now
1400 }
1401 #endif
1403 #ifndef PRODUCT
1404 void ReferenceProcessor::clear_discovered_references() {
1405 guarantee(!_discovering_refs, "Discovering refs?");
1406 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
1407 clear_discovered_references(_discovered_refs[i]);
1408 }
1409 }
1411 #endif // PRODUCT