Mon, 21 Nov 2011 07:47:34 +0100
7110718: -XX:MarkSweepAlwaysCompactCount=0 crashes the JVM
Summary: Interpret MarkSweepAlwaysCompactCount < 1 as never do full compaction
Reviewed-by: ysr, tonyp, jmasa, johnc
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "gc_interface/collectedHeap.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "memory/referencePolicy.hpp"
31 #include "memory/referenceProcessor.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/java.hpp"
34 #include "runtime/jniHandles.hpp"
36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
38 bool ReferenceProcessor::_pending_list_uses_discovered_field = false;
39 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0;
41 void referenceProcessor_init() {
42 ReferenceProcessor::init_statics();
43 }
45 void ReferenceProcessor::init_statics() {
46 jlong now = os::javaTimeMillis();
48 // Initialize the soft ref timestamp clock.
49 _soft_ref_timestamp_clock = now;
50 // Also update the soft ref clock in j.l.r.SoftReference
51 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock);
53 _always_clear_soft_ref_policy = new AlwaysClearPolicy();
54 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
55 NOT_COMPILER2(LRUCurrentHeapPolicy());
56 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
57 vm_exit_during_initialization("Could not allocate reference policy object");
58 }
59 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
60 RefDiscoveryPolicy == ReferentBasedDiscovery,
61 "Unrecongnized RefDiscoveryPolicy");
62 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
63 }
65 void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) {
66 #ifdef ASSERT
67 // Verify that we're not currently discovering refs
68 assert(!verify_disabled || !_discovering_refs, "nested call?");
70 if (check_no_refs) {
71 // Verify that the discovered lists are empty
72 verify_no_references_recorded();
73 }
74 #endif // ASSERT
76 // Someone could have modified the value of the static
77 // field in the j.l.r.SoftReference class that holds the
78 // soft reference timestamp clock using reflection or
79 // Unsafe between GCs. Unconditionally update the static
80 // field in ReferenceProcessor here so that we use the new
81 // value during reference discovery.
83 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
84 _discovering_refs = true;
85 }
87 ReferenceProcessor::ReferenceProcessor(MemRegion span,
88 bool mt_processing,
89 int mt_processing_degree,
90 bool mt_discovery,
91 int mt_discovery_degree,
92 bool atomic_discovery,
93 BoolObjectClosure* is_alive_non_header,
94 bool discovered_list_needs_barrier) :
95 _discovering_refs(false),
96 _enqueuing_is_done(false),
97 _is_alive_non_header(is_alive_non_header),
98 _discovered_list_needs_barrier(discovered_list_needs_barrier),
99 _bs(NULL),
100 _processing_is_mt(mt_processing),
101 _next_id(0)
102 {
103 _span = span;
104 _discovery_is_atomic = atomic_discovery;
105 _discovery_is_mt = mt_discovery;
106 _num_q = MAX2(1, mt_processing_degree);
107 _max_num_q = MAX2(_num_q, mt_discovery_degree);
108 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList,
109 _max_num_q * number_of_subclasses_of_ref());
110 if (_discovered_refs == NULL) {
111 vm_exit_during_initialization("Could not allocated RefProc Array");
112 }
113 _discoveredSoftRefs = &_discovered_refs[0];
114 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q];
115 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
116 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
118 // Initialize all entries to NULL
119 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
120 _discovered_refs[i].set_head(NULL);
121 _discovered_refs[i].set_length(0);
122 }
124 // If we do barriers, cache a copy of the barrier set.
125 if (discovered_list_needs_barrier) {
126 _bs = Universe::heap()->barrier_set();
127 }
128 setup_policy(false /* default soft ref policy */);
129 }
131 #ifndef PRODUCT
132 void ReferenceProcessor::verify_no_references_recorded() {
133 guarantee(!_discovering_refs, "Discovering refs?");
134 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
135 guarantee(_discovered_refs[i].is_empty(),
136 "Found non-empty discovered list");
137 }
138 }
139 #endif
141 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
142 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
143 if (UseCompressedOops) {
144 f->do_oop((narrowOop*)_discovered_refs[i].adr_head());
145 } else {
146 f->do_oop((oop*)_discovered_refs[i].adr_head());
147 }
148 }
149 }
151 void ReferenceProcessor::update_soft_ref_master_clock() {
152 // Update (advance) the soft ref master clock field. This must be done
153 // after processing the soft ref list.
154 jlong now = os::javaTimeMillis();
155 jlong soft_ref_clock = java_lang_ref_SoftReference::clock();
156 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync");
158 NOT_PRODUCT(
159 if (now < _soft_ref_timestamp_clock) {
160 warning("time warp: "INT64_FORMAT" to "INT64_FORMAT,
161 _soft_ref_timestamp_clock, now);
162 }
163 )
164 // In product mode, protect ourselves from system time being adjusted
165 // externally and going backward; see note in the implementation of
166 // GenCollectedHeap::time_since_last_gc() for the right way to fix
167 // this uniformly throughout the VM; see bug-id 4741166. XXX
168 if (now > _soft_ref_timestamp_clock) {
169 _soft_ref_timestamp_clock = now;
170 java_lang_ref_SoftReference::set_clock(now);
171 }
172 // Else leave clock stalled at its old value until time progresses
173 // past clock value.
174 }
176 void ReferenceProcessor::process_discovered_references(
177 BoolObjectClosure* is_alive,
178 OopClosure* keep_alive,
179 VoidClosure* complete_gc,
180 AbstractRefProcTaskExecutor* task_executor) {
181 NOT_PRODUCT(verify_ok_to_handle_reflists());
183 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
184 // Stop treating discovered references specially.
185 disable_discovery();
187 // If discovery was concurrent, someone could have modified
188 // the value of the static field in the j.l.r.SoftReference
189 // class that holds the soft reference timestamp clock using
190 // reflection or Unsafe between when discovery was enabled and
191 // now. Unconditionally update the static field in ReferenceProcessor
192 // here so that we use the new value during processing of the
193 // discovered soft refs.
195 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
197 bool trace_time = PrintGCDetails && PrintReferenceGC;
198 // Soft references
199 {
200 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
201 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
202 is_alive, keep_alive, complete_gc, task_executor);
203 }
205 update_soft_ref_master_clock();
207 // Weak references
208 {
209 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
210 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
211 is_alive, keep_alive, complete_gc, task_executor);
212 }
214 // Final references
215 {
216 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
217 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
218 is_alive, keep_alive, complete_gc, task_executor);
219 }
221 // Phantom references
222 {
223 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
224 process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
225 is_alive, keep_alive, complete_gc, task_executor);
226 }
228 // Weak global JNI references. It would make more sense (semantically) to
229 // traverse these simultaneously with the regular weak references above, but
230 // that is not how the JDK1.2 specification is. See #4126360. Native code can
231 // thus use JNI weak references to circumvent the phantom references and
232 // resurrect a "post-mortem" object.
233 {
234 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
235 if (task_executor != NULL) {
236 task_executor->set_single_threaded_mode();
237 }
238 process_phaseJNI(is_alive, keep_alive, complete_gc);
239 }
240 }
242 #ifndef PRODUCT
243 // Calculate the number of jni handles.
244 uint ReferenceProcessor::count_jni_refs() {
245 class AlwaysAliveClosure: public BoolObjectClosure {
246 public:
247 virtual bool do_object_b(oop obj) { return true; }
248 virtual void do_object(oop obj) { assert(false, "Don't call"); }
249 };
251 class CountHandleClosure: public OopClosure {
252 private:
253 int _count;
254 public:
255 CountHandleClosure(): _count(0) {}
256 void do_oop(oop* unused) { _count++; }
257 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
258 int count() { return _count; }
259 };
260 CountHandleClosure global_handle_count;
261 AlwaysAliveClosure always_alive;
262 JNIHandles::weak_oops_do(&always_alive, &global_handle_count);
263 return global_handle_count.count();
264 }
265 #endif
267 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
268 OopClosure* keep_alive,
269 VoidClosure* complete_gc) {
270 #ifndef PRODUCT
271 if (PrintGCDetails && PrintReferenceGC) {
272 unsigned int count = count_jni_refs();
273 gclog_or_tty->print(", %u refs", count);
274 }
275 #endif
276 JNIHandles::weak_oops_do(is_alive, keep_alive);
277 complete_gc->do_void();
278 }
281 template <class T>
282 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
283 AbstractRefProcTaskExecutor* task_executor) {
285 // Remember old value of pending references list
286 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
287 T old_pending_list_value = *pending_list_addr;
289 // Enqueue references that are not made active again, and
290 // clear the decks for the next collection (cycle).
291 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
292 // Do the oop-check on pending_list_addr missed in
293 // enqueue_discovered_reflist. We should probably
294 // do a raw oop_check so that future such idempotent
295 // oop_stores relying on the oop-check side-effect
296 // may be elided automatically and safely without
297 // affecting correctness.
298 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
300 // Stop treating discovered references specially.
301 ref->disable_discovery();
303 // Return true if new pending references were added
304 return old_pending_list_value != *pending_list_addr;
305 }
307 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
308 NOT_PRODUCT(verify_ok_to_handle_reflists());
309 if (UseCompressedOops) {
310 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
311 } else {
312 return enqueue_discovered_ref_helper<oop>(this, task_executor);
313 }
314 }
316 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
317 HeapWord* pending_list_addr) {
318 // Given a list of refs linked through the "discovered" field
319 // (java.lang.ref.Reference.discovered), self-loop their "next" field
320 // thus distinguishing them from active References, then
321 // prepend them to the pending list.
322 // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777),
323 // the "next" field is used to chain the pending list, not the discovered
324 // field.
326 if (TraceReferenceGC && PrintGCDetails) {
327 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
328 INTPTR_FORMAT, (address)refs_list.head());
329 }
331 oop obj = NULL;
332 oop next_d = refs_list.head();
333 if (pending_list_uses_discovered_field()) { // New behaviour
334 // Walk down the list, self-looping the next field
335 // so that the References are not considered active.
336 while (obj != next_d) {
337 obj = next_d;
338 assert(obj->is_instanceRef(), "should be reference object");
339 next_d = java_lang_ref_Reference::discovered(obj);
340 if (TraceReferenceGC && PrintGCDetails) {
341 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
342 obj, next_d);
343 }
344 assert(java_lang_ref_Reference::next(obj) == NULL,
345 "Reference not active; should not be discovered");
346 // Self-loop next, so as to make Ref not active.
347 java_lang_ref_Reference::set_next(obj, obj);
348 if (next_d == obj) { // obj is last
349 // Swap refs_list into pendling_list_addr and
350 // set obj's discovered to what we read from pending_list_addr.
351 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
352 // Need oop_check on pending_list_addr above;
353 // see special oop-check code at the end of
354 // enqueue_discovered_reflists() further below.
355 java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL
356 }
357 }
358 } else { // Old behaviour
359 // Walk down the list, copying the discovered field into
360 // the next field and clearing the discovered field.
361 while (obj != next_d) {
362 obj = next_d;
363 assert(obj->is_instanceRef(), "should be reference object");
364 next_d = java_lang_ref_Reference::discovered(obj);
365 if (TraceReferenceGC && PrintGCDetails) {
366 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
367 obj, next_d);
368 }
369 assert(java_lang_ref_Reference::next(obj) == NULL,
370 "The reference should not be enqueued");
371 if (next_d == obj) { // obj is last
372 // Swap refs_list into pendling_list_addr and
373 // set obj's next to what we read from pending_list_addr.
374 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
375 // Need oop_check on pending_list_addr above;
376 // see special oop-check code at the end of
377 // enqueue_discovered_reflists() further below.
378 if (old == NULL) {
379 // obj should be made to point to itself, since
380 // pending list was empty.
381 java_lang_ref_Reference::set_next(obj, obj);
382 } else {
383 java_lang_ref_Reference::set_next(obj, old);
384 }
385 } else {
386 java_lang_ref_Reference::set_next(obj, next_d);
387 }
388 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
389 }
390 }
391 }
393 // Parallel enqueue task
394 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
395 public:
396 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
397 DiscoveredList discovered_refs[],
398 HeapWord* pending_list_addr,
399 int n_queues)
400 : EnqueueTask(ref_processor, discovered_refs,
401 pending_list_addr, n_queues)
402 { }
404 virtual void work(unsigned int work_id) {
405 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
406 // Simplest first cut: static partitioning.
407 int index = work_id;
408 // The increment on "index" must correspond to the maximum number of queues
409 // (n_queues) with which that ReferenceProcessor was created. That
410 // is because of the "clever" way the discovered references lists were
411 // allocated and are indexed into.
412 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
413 for (int j = 0;
414 j < ReferenceProcessor::number_of_subclasses_of_ref();
415 j++, index += _n_queues) {
416 _ref_processor.enqueue_discovered_reflist(
417 _refs_lists[index], _pending_list_addr);
418 _refs_lists[index].set_head(NULL);
419 _refs_lists[index].set_length(0);
420 }
421 }
422 };
424 // Enqueue references that are not made active again
425 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
426 AbstractRefProcTaskExecutor* task_executor) {
427 if (_processing_is_mt && task_executor != NULL) {
428 // Parallel code
429 RefProcEnqueueTask tsk(*this, _discovered_refs,
430 pending_list_addr, _max_num_q);
431 task_executor->execute(tsk);
432 } else {
433 // Serial code: call the parent class's implementation
434 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
435 enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr);
436 _discovered_refs[i].set_head(NULL);
437 _discovered_refs[i].set_length(0);
438 }
439 }
440 }
442 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
443 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
444 oop discovered = java_lang_ref_Reference::discovered(_ref);
445 assert(_discovered_addr && discovered->is_oop_or_null(),
446 "discovered field is bad");
447 _next = discovered;
448 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
449 _referent = java_lang_ref_Reference::referent(_ref);
450 assert(Universe::heap()->is_in_reserved_or_null(_referent),
451 "Wrong oop found in java.lang.Reference object");
452 assert(allow_null_referent ?
453 _referent->is_oop_or_null()
454 : _referent->is_oop(),
455 "bad referent");
456 }
458 void DiscoveredListIterator::remove() {
459 assert(_ref->is_oop(), "Dropping a bad reference");
460 oop_store_raw(_discovered_addr, NULL);
462 // First _prev_next ref actually points into DiscoveredList (gross).
463 oop new_next;
464 if (_next == _ref) {
465 // At the end of the list, we should make _prev point to itself.
466 // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
467 // and _prev will be NULL.
468 new_next = _prev;
469 } else {
470 new_next = _next;
471 }
473 if (UseCompressedOops) {
474 // Remove Reference object from list.
475 oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
476 } else {
477 // Remove Reference object from list.
478 oopDesc::store_heap_oop((oop*)_prev_next, new_next);
479 }
480 NOT_PRODUCT(_removed++);
481 _refs_list.dec_length(1);
482 }
484 // Make the Reference object active again.
485 void DiscoveredListIterator::make_active() {
486 // For G1 we don't want to use set_next - it
487 // will dirty the card for the next field of
488 // the reference object and will fail
489 // CT verification.
490 if (UseG1GC) {
491 BarrierSet* bs = oopDesc::bs();
492 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
494 if (UseCompressedOops) {
495 bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
496 } else {
497 bs->write_ref_field_pre((oop*)next_addr, NULL);
498 }
499 java_lang_ref_Reference::set_next_raw(_ref, NULL);
500 } else {
501 java_lang_ref_Reference::set_next(_ref, NULL);
502 }
503 }
505 void DiscoveredListIterator::clear_referent() {
506 oop_store_raw(_referent_addr, NULL);
507 }
509 // NOTE: process_phase*() are largely similar, and at a high level
510 // merely iterate over the extant list applying a predicate to
511 // each of its elements and possibly removing that element from the
512 // list and applying some further closures to that element.
513 // We should consider the possibility of replacing these
514 // process_phase*() methods by abstracting them into
515 // a single general iterator invocation that receives appropriate
516 // closures that accomplish this work.
518 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
519 // referents are not alive, but that should be kept alive for policy reasons.
520 // Keep alive the transitive closure of all such referents.
521 void
522 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
523 ReferencePolicy* policy,
524 BoolObjectClosure* is_alive,
525 OopClosure* keep_alive,
526 VoidClosure* complete_gc) {
527 assert(policy != NULL, "Must have a non-NULL policy");
528 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
529 // Decide which softly reachable refs should be kept alive.
530 while (iter.has_next()) {
531 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
532 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
533 if (referent_is_dead &&
534 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
535 if (TraceReferenceGC) {
536 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
537 iter.obj(), iter.obj()->blueprint()->internal_name());
538 }
539 // Remove Reference object from list
540 iter.remove();
541 // Make the Reference object active again
542 iter.make_active();
543 // keep the referent around
544 iter.make_referent_alive();
545 iter.move_to_next();
546 } else {
547 iter.next();
548 }
549 }
550 // Close the reachable set
551 complete_gc->do_void();
552 NOT_PRODUCT(
553 if (PrintGCDetails && TraceReferenceGC) {
554 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
555 "discovered Refs by policy, from list " INTPTR_FORMAT,
556 iter.removed(), iter.processed(), (address)refs_list.head());
557 }
558 )
559 }
561 // Traverse the list and remove any Refs that are not active, or
562 // whose referents are either alive or NULL.
563 void
564 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
565 BoolObjectClosure* is_alive,
566 OopClosure* keep_alive) {
567 assert(discovery_is_atomic(), "Error");
568 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
569 while (iter.has_next()) {
570 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
571 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
572 assert(next == NULL, "Should not discover inactive Reference");
573 if (iter.is_referent_alive()) {
574 if (TraceReferenceGC) {
575 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
576 iter.obj(), iter.obj()->blueprint()->internal_name());
577 }
578 // The referent is reachable after all.
579 // Remove Reference object from list.
580 iter.remove();
581 // Update the referent pointer as necessary: Note that this
582 // should not entail any recursive marking because the
583 // referent must already have been traversed.
584 iter.make_referent_alive();
585 iter.move_to_next();
586 } else {
587 iter.next();
588 }
589 }
590 NOT_PRODUCT(
591 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
592 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
593 "Refs in discovered list " INTPTR_FORMAT,
594 iter.removed(), iter.processed(), (address)refs_list.head());
595 }
596 )
597 }
599 void
600 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
601 BoolObjectClosure* is_alive,
602 OopClosure* keep_alive,
603 VoidClosure* complete_gc) {
604 assert(!discovery_is_atomic(), "Error");
605 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
606 while (iter.has_next()) {
607 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
608 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
609 oop next = java_lang_ref_Reference::next(iter.obj());
610 if ((iter.referent() == NULL || iter.is_referent_alive() ||
611 next != NULL)) {
612 assert(next->is_oop_or_null(), "bad next field");
613 // Remove Reference object from list
614 iter.remove();
615 // Trace the cohorts
616 iter.make_referent_alive();
617 if (UseCompressedOops) {
618 keep_alive->do_oop((narrowOop*)next_addr);
619 } else {
620 keep_alive->do_oop((oop*)next_addr);
621 }
622 iter.move_to_next();
623 } else {
624 iter.next();
625 }
626 }
627 // Now close the newly reachable set
628 complete_gc->do_void();
629 NOT_PRODUCT(
630 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
631 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
632 "Refs in discovered list " INTPTR_FORMAT,
633 iter.removed(), iter.processed(), (address)refs_list.head());
634 }
635 )
636 }
638 // Traverse the list and process the referents, by either
639 // clearing them or keeping them (and their reachable
640 // closure) alive.
641 void
642 ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
643 bool clear_referent,
644 BoolObjectClosure* is_alive,
645 OopClosure* keep_alive,
646 VoidClosure* complete_gc) {
647 ResourceMark rm;
648 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
649 while (iter.has_next()) {
650 iter.update_discovered();
651 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
652 if (clear_referent) {
653 // NULL out referent pointer
654 iter.clear_referent();
655 } else {
656 // keep the referent around
657 iter.make_referent_alive();
658 }
659 if (TraceReferenceGC) {
660 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
661 clear_referent ? "cleared " : "",
662 iter.obj(), iter.obj()->blueprint()->internal_name());
663 }
664 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
665 iter.next();
666 }
667 // Remember to update the next pointer of the last ref.
668 iter.update_discovered();
669 // Close the reachable set
670 complete_gc->do_void();
671 }
673 void
674 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) {
675 oop obj = NULL;
676 oop next = refs_list.head();
677 while (next != obj) {
678 obj = next;
679 next = java_lang_ref_Reference::discovered(obj);
680 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
681 }
682 refs_list.set_head(NULL);
683 refs_list.set_length(0);
684 }
686 void
687 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
688 clear_discovered_references(refs_list);
689 }
691 void ReferenceProcessor::abandon_partial_discovery() {
692 // loop over the lists
693 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
694 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
695 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
696 }
697 abandon_partial_discovered_list(_discovered_refs[i]);
698 }
699 }
701 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
702 public:
703 RefProcPhase1Task(ReferenceProcessor& ref_processor,
704 DiscoveredList refs_lists[],
705 ReferencePolicy* policy,
706 bool marks_oops_alive)
707 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
708 _policy(policy)
709 { }
710 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
711 OopClosure& keep_alive,
712 VoidClosure& complete_gc)
713 {
714 Thread* thr = Thread::current();
715 int refs_list_index = ((WorkerThread*)thr)->id();
716 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy,
717 &is_alive, &keep_alive, &complete_gc);
718 }
719 private:
720 ReferencePolicy* _policy;
721 };
723 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
724 public:
725 RefProcPhase2Task(ReferenceProcessor& ref_processor,
726 DiscoveredList refs_lists[],
727 bool marks_oops_alive)
728 : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
729 { }
730 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
731 OopClosure& keep_alive,
732 VoidClosure& complete_gc)
733 {
734 _ref_processor.process_phase2(_refs_lists[i],
735 &is_alive, &keep_alive, &complete_gc);
736 }
737 };
739 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
740 public:
741 RefProcPhase3Task(ReferenceProcessor& ref_processor,
742 DiscoveredList refs_lists[],
743 bool clear_referent,
744 bool marks_oops_alive)
745 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
746 _clear_referent(clear_referent)
747 { }
748 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
749 OopClosure& keep_alive,
750 VoidClosure& complete_gc)
751 {
752 // Don't use "refs_list_index" calculated in this way because
753 // balance_queues() has moved the Ref's into the first n queues.
754 // Thread* thr = Thread::current();
755 // int refs_list_index = ((WorkerThread*)thr)->id();
756 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent,
757 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
758 &is_alive, &keep_alive, &complete_gc);
759 }
760 private:
761 bool _clear_referent;
762 };
764 void ReferenceProcessor::set_discovered(oop ref, oop value) {
765 if (_discovered_list_needs_barrier) {
766 java_lang_ref_Reference::set_discovered(ref, value);
767 } else {
768 java_lang_ref_Reference::set_discovered_raw(ref, value);
769 }
770 }
772 // Balances reference queues.
773 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
774 // queues[0, 1, ..., _num_q-1] because only the first _num_q
775 // corresponding to the active workers will be processed.
776 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
777 {
778 // calculate total length
779 size_t total_refs = 0;
780 if (TraceReferenceGC && PrintGCDetails) {
781 gclog_or_tty->print_cr("\nBalance ref_lists ");
782 }
784 for (int i = 0; i < _max_num_q; ++i) {
785 total_refs += ref_lists[i].length();
786 if (TraceReferenceGC && PrintGCDetails) {
787 gclog_or_tty->print("%d ", ref_lists[i].length());
788 }
789 }
790 if (TraceReferenceGC && PrintGCDetails) {
791 gclog_or_tty->print_cr(" = %d", total_refs);
792 }
793 size_t avg_refs = total_refs / _num_q + 1;
794 int to_idx = 0;
795 for (int from_idx = 0; from_idx < _max_num_q; from_idx++) {
796 bool move_all = false;
797 if (from_idx >= _num_q) {
798 move_all = ref_lists[from_idx].length() > 0;
799 }
800 while ((ref_lists[from_idx].length() > avg_refs) ||
801 move_all) {
802 assert(to_idx < _num_q, "Sanity Check!");
803 if (ref_lists[to_idx].length() < avg_refs) {
804 // move superfluous refs
805 size_t refs_to_move;
806 // Move all the Ref's if the from queue will not be processed.
807 if (move_all) {
808 refs_to_move = MIN2(ref_lists[from_idx].length(),
809 avg_refs - ref_lists[to_idx].length());
810 } else {
811 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
812 avg_refs - ref_lists[to_idx].length());
813 }
815 assert(refs_to_move > 0, "otherwise the code below will fail");
817 oop move_head = ref_lists[from_idx].head();
818 oop move_tail = move_head;
819 oop new_head = move_head;
820 // find an element to split the list on
821 for (size_t j = 0; j < refs_to_move; ++j) {
822 move_tail = new_head;
823 new_head = java_lang_ref_Reference::discovered(new_head);
824 }
826 // Add the chain to the to list.
827 if (ref_lists[to_idx].head() == NULL) {
828 // to list is empty. Make a loop at the end.
829 set_discovered(move_tail, move_tail);
830 } else {
831 set_discovered(move_tail, ref_lists[to_idx].head());
832 }
833 ref_lists[to_idx].set_head(move_head);
834 ref_lists[to_idx].inc_length(refs_to_move);
836 // Remove the chain from the from list.
837 if (move_tail == new_head) {
838 // We found the end of the from list.
839 ref_lists[from_idx].set_head(NULL);
840 } else {
841 ref_lists[from_idx].set_head(new_head);
842 }
843 ref_lists[from_idx].dec_length(refs_to_move);
844 if (ref_lists[from_idx].length() == 0) {
845 break;
846 }
847 } else {
848 to_idx = (to_idx + 1) % _num_q;
849 }
850 }
851 }
852 #ifdef ASSERT
853 size_t balanced_total_refs = 0;
854 for (int i = 0; i < _max_num_q; ++i) {
855 balanced_total_refs += ref_lists[i].length();
856 if (TraceReferenceGC && PrintGCDetails) {
857 gclog_or_tty->print("%d ", ref_lists[i].length());
858 }
859 }
860 if (TraceReferenceGC && PrintGCDetails) {
861 gclog_or_tty->print_cr(" = %d", balanced_total_refs);
862 gclog_or_tty->flush();
863 }
864 assert(total_refs == balanced_total_refs, "Balancing was incomplete");
865 #endif
866 }
868 void ReferenceProcessor::balance_all_queues() {
869 balance_queues(_discoveredSoftRefs);
870 balance_queues(_discoveredWeakRefs);
871 balance_queues(_discoveredFinalRefs);
872 balance_queues(_discoveredPhantomRefs);
873 }
875 void
876 ReferenceProcessor::process_discovered_reflist(
877 DiscoveredList refs_lists[],
878 ReferencePolicy* policy,
879 bool clear_referent,
880 BoolObjectClosure* is_alive,
881 OopClosure* keep_alive,
882 VoidClosure* complete_gc,
883 AbstractRefProcTaskExecutor* task_executor)
884 {
885 bool mt_processing = task_executor != NULL && _processing_is_mt;
886 // If discovery used MT and a dynamic number of GC threads, then
887 // the queues must be balanced for correctness if fewer than the
888 // maximum number of queues were used. The number of queue used
889 // during discovery may be different than the number to be used
890 // for processing so don't depend of _num_q < _max_num_q as part
891 // of the test.
892 bool must_balance = _discovery_is_mt;
894 if ((mt_processing && ParallelRefProcBalancingEnabled) ||
895 must_balance) {
896 balance_queues(refs_lists);
897 }
898 if (PrintReferenceGC && PrintGCDetails) {
899 size_t total = 0;
900 for (int i = 0; i < _max_num_q; ++i) {
901 total += refs_lists[i].length();
902 }
903 gclog_or_tty->print(", %u refs", total);
904 }
906 // Phase 1 (soft refs only):
907 // . Traverse the list and remove any SoftReferences whose
908 // referents are not alive, but that should be kept alive for
909 // policy reasons. Keep alive the transitive closure of all
910 // such referents.
911 if (policy != NULL) {
912 if (mt_processing) {
913 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
914 task_executor->execute(phase1);
915 } else {
916 for (int i = 0; i < _max_num_q; i++) {
917 process_phase1(refs_lists[i], policy,
918 is_alive, keep_alive, complete_gc);
919 }
920 }
921 } else { // policy == NULL
922 assert(refs_lists != _discoveredSoftRefs,
923 "Policy must be specified for soft references.");
924 }
926 // Phase 2:
927 // . Traverse the list and remove any refs whose referents are alive.
928 if (mt_processing) {
929 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
930 task_executor->execute(phase2);
931 } else {
932 for (int i = 0; i < _max_num_q; i++) {
933 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
934 }
935 }
937 // Phase 3:
938 // . Traverse the list and process referents as appropriate.
939 if (mt_processing) {
940 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
941 task_executor->execute(phase3);
942 } else {
943 for (int i = 0; i < _max_num_q; i++) {
944 process_phase3(refs_lists[i], clear_referent,
945 is_alive, keep_alive, complete_gc);
946 }
947 }
948 }
950 void ReferenceProcessor::clean_up_discovered_references() {
951 // loop over the lists
952 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
953 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
954 gclog_or_tty->print_cr(
955 "\nScrubbing %s discovered list of Null referents",
956 list_name(i));
957 }
958 clean_up_discovered_reflist(_discovered_refs[i]);
959 }
960 }
962 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
963 assert(!discovery_is_atomic(), "Else why call this method?");
964 DiscoveredListIterator iter(refs_list, NULL, NULL);
965 while (iter.has_next()) {
966 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
967 oop next = java_lang_ref_Reference::next(iter.obj());
968 assert(next->is_oop_or_null(), "bad next field");
969 // If referent has been cleared or Reference is not active,
970 // drop it.
971 if (iter.referent() == NULL || next != NULL) {
972 debug_only(
973 if (PrintGCDetails && TraceReferenceGC) {
974 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
975 INTPTR_FORMAT " with next field: " INTPTR_FORMAT
976 " and referent: " INTPTR_FORMAT,
977 iter.obj(), next, iter.referent());
978 }
979 )
980 // Remove Reference object from list
981 iter.remove();
982 iter.move_to_next();
983 } else {
984 iter.next();
985 }
986 }
987 NOT_PRODUCT(
988 if (PrintGCDetails && TraceReferenceGC) {
989 gclog_or_tty->print(
990 " Removed %d Refs with NULL referents out of %d discovered Refs",
991 iter.removed(), iter.processed());
992 }
993 )
994 }
996 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
997 int id = 0;
998 // Determine the queue index to use for this object.
999 if (_discovery_is_mt) {
1000 // During a multi-threaded discovery phase,
1001 // each thread saves to its "own" list.
1002 Thread* thr = Thread::current();
1003 id = thr->as_Worker_thread()->id();
1004 } else {
1005 // single-threaded discovery, we save in round-robin
1006 // fashion to each of the lists.
1007 if (_processing_is_mt) {
1008 id = next_id();
1009 }
1010 }
1011 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)");
1013 // Get the discovered queue to which we will add
1014 DiscoveredList* list = NULL;
1015 switch (rt) {
1016 case REF_OTHER:
1017 // Unknown reference type, no special treatment
1018 break;
1019 case REF_SOFT:
1020 list = &_discoveredSoftRefs[id];
1021 break;
1022 case REF_WEAK:
1023 list = &_discoveredWeakRefs[id];
1024 break;
1025 case REF_FINAL:
1026 list = &_discoveredFinalRefs[id];
1027 break;
1028 case REF_PHANTOM:
1029 list = &_discoveredPhantomRefs[id];
1030 break;
1031 case REF_NONE:
1032 // we should not reach here if we are an instanceRefKlass
1033 default:
1034 ShouldNotReachHere();
1035 }
1036 if (TraceReferenceGC && PrintGCDetails) {
1037 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list);
1038 }
1039 return list;
1040 }
1042 inline void
1043 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1044 oop obj,
1045 HeapWord* discovered_addr) {
1046 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1047 // First we must make sure this object is only enqueued once. CAS in a non null
1048 // discovered_addr.
1049 oop current_head = refs_list.head();
1050 // The last ref must have its discovered field pointing to itself.
1051 oop next_discovered = (current_head != NULL) ? current_head : obj;
1053 // Note: In the case of G1, this specific pre-barrier is strictly
1054 // not necessary because the only case we are interested in
1055 // here is when *discovered_addr is NULL (see the CAS further below),
1056 // so this will expand to nothing. As a result, we have manually
1057 // elided this out for G1, but left in the test for some future
1058 // collector that might have need for a pre-barrier here, e.g.:-
1059 // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
1060 assert(!_discovered_list_needs_barrier || UseG1GC,
1061 "Need to check non-G1 collector: "
1062 "may need a pre-write-barrier for CAS from NULL below");
1063 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
1064 NULL);
1065 if (retest == NULL) {
1066 // This thread just won the right to enqueue the object.
1067 // We have separate lists for enqueueing, so no synchronization
1068 // is necessary.
1069 refs_list.set_head(obj);
1070 refs_list.inc_length(1);
1071 if (_discovered_list_needs_barrier) {
1072 _bs->write_ref_field((void*)discovered_addr, next_discovered);
1073 }
1075 if (TraceReferenceGC) {
1076 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
1077 obj, obj->blueprint()->internal_name());
1078 }
1079 } else {
1080 // If retest was non NULL, another thread beat us to it:
1081 // The reference has already been discovered...
1082 if (TraceReferenceGC) {
1083 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
1084 obj, obj->blueprint()->internal_name());
1085 }
1086 }
1087 }
1089 #ifndef PRODUCT
1090 // Non-atomic (i.e. concurrent) discovery might allow us
1091 // to observe j.l.References with NULL referents, being those
1092 // cleared concurrently by mutators during (or after) discovery.
1093 void ReferenceProcessor::verify_referent(oop obj) {
1094 bool da = discovery_is_atomic();
1095 oop referent = java_lang_ref_Reference::referent(obj);
1096 assert(da ? referent->is_oop() : referent->is_oop_or_null(),
1097 err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
1098 INTPTR_FORMAT " during %satomic discovery ",
1099 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
1100 }
1101 #endif
1103 // We mention two of several possible choices here:
1104 // #0: if the reference object is not in the "originating generation"
1105 // (or part of the heap being collected, indicated by our "span"
1106 // we don't treat it specially (i.e. we scan it as we would
1107 // a normal oop, treating its references as strong references).
1108 // This means that references can't be discovered unless their
1109 // referent is also in the same span. This is the simplest,
1110 // most "local" and most conservative approach, albeit one
1111 // that may cause weak references to be enqueued least promptly.
1112 // We call this choice the "ReferenceBasedDiscovery" policy.
1113 // #1: the reference object may be in any generation (span), but if
1114 // the referent is in the generation (span) being currently collected
1115 // then we can discover the reference object, provided
1116 // the object has not already been discovered by
1117 // a different concurrently running collector (as may be the
1118 // case, for instance, if the reference object is in CMS and
1119 // the referent in DefNewGeneration), and provided the processing
1120 // of this reference object by the current collector will
1121 // appear atomic to every other collector in the system.
1122 // (Thus, for instance, a concurrent collector may not
1123 // discover references in other generations even if the
1124 // referent is in its own generation). This policy may,
1125 // in certain cases, enqueue references somewhat sooner than
1126 // might Policy #0 above, but at marginally increased cost
1127 // and complexity in processing these references.
1128 // We call this choice the "RefeferentBasedDiscovery" policy.
1129 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1130 // Make sure we are discovering refs (rather than processing discovered refs).
1131 if (!_discovering_refs || !RegisterReferences) {
1132 return false;
1133 }
1134 // We only discover active references.
1135 oop next = java_lang_ref_Reference::next(obj);
1136 if (next != NULL) { // Ref is no longer active
1137 return false;
1138 }
1140 HeapWord* obj_addr = (HeapWord*)obj;
1141 if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1142 !_span.contains(obj_addr)) {
1143 // Reference is not in the originating generation;
1144 // don't treat it specially (i.e. we want to scan it as a normal
1145 // object with strong references).
1146 return false;
1147 }
1149 // We only discover references whose referents are not (yet)
1150 // known to be strongly reachable.
1151 if (is_alive_non_header() != NULL) {
1152 verify_referent(obj);
1153 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1154 return false; // referent is reachable
1155 }
1156 }
1157 if (rt == REF_SOFT) {
1158 // For soft refs we can decide now if these are not
1159 // current candidates for clearing, in which case we
1160 // can mark through them now, rather than delaying that
1161 // to the reference-processing phase. Since all current
1162 // time-stamp policies advance the soft-ref clock only
1163 // at a major collection cycle, this is always currently
1164 // accurate.
1165 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) {
1166 return false;
1167 }
1168 }
1170 ResourceMark rm; // Needed for tracing.
1172 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1173 const oop discovered = java_lang_ref_Reference::discovered(obj);
1174 assert(discovered->is_oop_or_null(), "bad discovered field");
1175 if (discovered != NULL) {
1176 // The reference has already been discovered...
1177 if (TraceReferenceGC) {
1178 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
1179 obj, obj->blueprint()->internal_name());
1180 }
1181 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1182 // assumes that an object is not processed twice;
1183 // if it's been already discovered it must be on another
1184 // generation's discovered list; so we won't discover it.
1185 return false;
1186 } else {
1187 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1188 "Unrecognized policy");
1189 // Check assumption that an object is not potentially
1190 // discovered twice except by concurrent collectors that potentially
1191 // trace the same Reference object twice.
1192 assert(UseConcMarkSweepGC || UseG1GC,
1193 "Only possible with a concurrent marking collector");
1194 return true;
1195 }
1196 }
1198 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1199 verify_referent(obj);
1200 // Discover if and only if EITHER:
1201 // .. reference is in our span, OR
1202 // .. we are an atomic collector and referent is in our span
1203 if (_span.contains(obj_addr) ||
1204 (discovery_is_atomic() &&
1205 _span.contains(java_lang_ref_Reference::referent(obj)))) {
1206 // should_enqueue = true;
1207 } else {
1208 return false;
1209 }
1210 } else {
1211 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1212 _span.contains(obj_addr), "code inconsistency");
1213 }
1215 // Get the right type of discovered queue head.
1216 DiscoveredList* list = get_discovered_list(rt);
1217 if (list == NULL) {
1218 return false; // nothing special needs to be done
1219 }
1221 if (_discovery_is_mt) {
1222 add_to_discovered_list_mt(*list, obj, discovered_addr);
1223 } else {
1224 // If "_discovered_list_needs_barrier", we do write barriers when
1225 // updating the discovered reference list. Otherwise, we do a raw store
1226 // here: the field will be visited later when processing the discovered
1227 // references.
1228 oop current_head = list->head();
1229 // The last ref must have its discovered field pointing to itself.
1230 oop next_discovered = (current_head != NULL) ? current_head : obj;
1232 // As in the case further above, since we are over-writing a NULL
1233 // pre-value, we can safely elide the pre-barrier here for the case of G1.
1234 // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
1235 assert(discovered == NULL, "control point invariant");
1236 assert(!_discovered_list_needs_barrier || UseG1GC,
1237 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
1238 oop_store_raw(discovered_addr, next_discovered);
1239 if (_discovered_list_needs_barrier) {
1240 _bs->write_ref_field((void*)discovered_addr, next_discovered);
1241 }
1242 list->set_head(obj);
1243 list->inc_length(1);
1245 if (TraceReferenceGC) {
1246 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
1247 obj, obj->blueprint()->internal_name());
1248 }
1249 }
1250 assert(obj->is_oop(), "Discovered a bad reference");
1251 verify_referent(obj);
1252 return true;
1253 }
1255 // Preclean the discovered references by removing those
1256 // whose referents are alive, and by marking from those that
1257 // are not active. These lists can be handled here
1258 // in any order and, indeed, concurrently.
1259 void ReferenceProcessor::preclean_discovered_references(
1260 BoolObjectClosure* is_alive,
1261 OopClosure* keep_alive,
1262 VoidClosure* complete_gc,
1263 YieldClosure* yield,
1264 bool should_unload_classes) {
1266 NOT_PRODUCT(verify_ok_to_handle_reflists());
1268 #ifdef ASSERT
1269 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
1270 CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
1271 ExplicitGCInvokesConcurrentAndUnloadsClasses &&
1272 UseConcMarkSweepGC && should_unload_classes;
1273 RememberKlassesChecker mx(must_remember_klasses);
1274 #endif
1275 // Soft references
1276 {
1277 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
1278 false, gclog_or_tty);
1279 for (int i = 0; i < _max_num_q; i++) {
1280 if (yield->should_return()) {
1281 return;
1282 }
1283 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
1284 keep_alive, complete_gc, yield);
1285 }
1286 }
1288 // Weak references
1289 {
1290 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
1291 false, gclog_or_tty);
1292 for (int i = 0; i < _max_num_q; i++) {
1293 if (yield->should_return()) {
1294 return;
1295 }
1296 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
1297 keep_alive, complete_gc, yield);
1298 }
1299 }
1301 // Final references
1302 {
1303 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
1304 false, gclog_or_tty);
1305 for (int i = 0; i < _max_num_q; i++) {
1306 if (yield->should_return()) {
1307 return;
1308 }
1309 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
1310 keep_alive, complete_gc, yield);
1311 }
1312 }
1314 // Phantom references
1315 {
1316 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
1317 false, gclog_or_tty);
1318 for (int i = 0; i < _max_num_q; i++) {
1319 if (yield->should_return()) {
1320 return;
1321 }
1322 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
1323 keep_alive, complete_gc, yield);
1324 }
1325 }
1326 }
1328 // Walk the given discovered ref list, and remove all reference objects
1329 // whose referents are still alive, whose referents are NULL or which
1330 // are not active (have a non-NULL next field). NOTE: When we are
1331 // thus precleaning the ref lists (which happens single-threaded today),
1332 // we do not disable refs discovery to honour the correct semantics of
1333 // java.lang.Reference. As a result, we need to be careful below
1334 // that ref removal steps interleave safely with ref discovery steps
1335 // (in this thread).
1336 void
1337 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
1338 BoolObjectClosure* is_alive,
1339 OopClosure* keep_alive,
1340 VoidClosure* complete_gc,
1341 YieldClosure* yield) {
1342 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
1343 while (iter.has_next()) {
1344 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1345 oop obj = iter.obj();
1346 oop next = java_lang_ref_Reference::next(obj);
1347 if (iter.referent() == NULL || iter.is_referent_alive() ||
1348 next != NULL) {
1349 // The referent has been cleared, or is alive, or the Reference is not
1350 // active; we need to trace and mark its cohort.
1351 if (TraceReferenceGC) {
1352 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
1353 iter.obj(), iter.obj()->blueprint()->internal_name());
1354 }
1355 // Remove Reference object from list
1356 iter.remove();
1357 // Keep alive its cohort.
1358 iter.make_referent_alive();
1359 if (UseCompressedOops) {
1360 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
1361 keep_alive->do_oop(next_addr);
1362 } else {
1363 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
1364 keep_alive->do_oop(next_addr);
1365 }
1366 iter.move_to_next();
1367 } else {
1368 iter.next();
1369 }
1370 }
1371 // Close the reachable set
1372 complete_gc->do_void();
1374 NOT_PRODUCT(
1375 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
1376 gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
1377 "Refs in discovered list " INTPTR_FORMAT,
1378 iter.removed(), iter.processed(), (address)refs_list.head());
1379 }
1380 )
1381 }
1383 const char* ReferenceProcessor::list_name(int i) {
1384 assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
1385 "Out of bounds index");
1387 int j = i / _max_num_q;
1388 switch (j) {
1389 case 0: return "SoftRef";
1390 case 1: return "WeakRef";
1391 case 2: return "FinalRef";
1392 case 3: return "PhantomRef";
1393 }
1394 ShouldNotReachHere();
1395 return NULL;
1396 }
1398 #ifndef PRODUCT
1399 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1400 // empty for now
1401 }
1402 #endif
1404 #ifndef PRODUCT
1405 void ReferenceProcessor::clear_discovered_references() {
1406 guarantee(!_discovering_refs, "Discovering refs?");
1407 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
1408 clear_discovered_references(_discovered_refs[i]);
1409 }
1410 }
1412 #endif // PRODUCT