Tue, 11 May 2010 14:35:43 -0700
6931180: Migration to recent versions of MS Platform SDK
6951582: Build problems on win64
Summary: Changes to enable building JDK7 with Microsoft Visual Studio 2010
Reviewed-by: ohair, art, ccheung, dcubed
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_referenceProcessor.cpp.incl"
28 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
29 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
30 oop ReferenceProcessor::_sentinelRef = NULL;
31 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
33 // List of discovered references.
34 class DiscoveredList {
35 public:
36 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
37 oop head() const {
38 return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) :
39 _oop_head;
40 }
41 HeapWord* adr_head() {
42 return UseCompressedOops ? (HeapWord*)&_compressed_head :
43 (HeapWord*)&_oop_head;
44 }
45 void set_head(oop o) {
46 if (UseCompressedOops) {
47 // Must compress the head ptr.
48 _compressed_head = oopDesc::encode_heap_oop_not_null(o);
49 } else {
50 _oop_head = o;
51 }
52 }
53 bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); }
54 size_t length() { return _len; }
55 void set_length(size_t len) { _len = len; }
56 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
57 void dec_length(size_t dec) { _len -= dec; }
58 private:
59 // Set value depending on UseCompressedOops. This could be a template class
60 // but then we have to fix all the instantiations and declarations that use this class.
61 oop _oop_head;
62 narrowOop _compressed_head;
63 size_t _len;
64 };
66 void referenceProcessor_init() {
67 ReferenceProcessor::init_statics();
68 }
70 void ReferenceProcessor::init_statics() {
71 assert(_sentinelRef == NULL, "should be initialized precisely once");
72 EXCEPTION_MARK;
73 _sentinelRef = instanceKlass::cast(
74 SystemDictionary::Reference_klass())->
75 allocate_permanent_instance(THREAD);
77 // Initialize the master soft ref clock.
78 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
80 if (HAS_PENDING_EXCEPTION) {
81 Handle ex(THREAD, PENDING_EXCEPTION);
82 vm_exit_during_initialization(ex);
83 }
84 assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
85 "Just constructed it!");
86 _always_clear_soft_ref_policy = new AlwaysClearPolicy();
87 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
88 NOT_COMPILER2(LRUCurrentHeapPolicy());
89 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
90 vm_exit_during_initialization("Could not allocate reference policy object");
91 }
92 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
93 RefDiscoveryPolicy == ReferentBasedDiscovery,
94 "Unrecongnized RefDiscoveryPolicy");
95 }
97 ReferenceProcessor*
98 ReferenceProcessor::create_ref_processor(MemRegion span,
99 bool atomic_discovery,
100 bool mt_discovery,
101 BoolObjectClosure* is_alive_non_header,
102 int parallel_gc_threads,
103 bool mt_processing,
104 bool dl_needs_barrier) {
105 int mt_degree = 1;
106 if (parallel_gc_threads > 1) {
107 mt_degree = parallel_gc_threads;
108 }
109 ReferenceProcessor* rp =
110 new ReferenceProcessor(span, atomic_discovery,
111 mt_discovery, mt_degree,
112 mt_processing && (parallel_gc_threads > 0),
113 dl_needs_barrier);
114 if (rp == NULL) {
115 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
116 }
117 rp->set_is_alive_non_header(is_alive_non_header);
118 rp->setup_policy(false /* default soft ref policy */);
119 return rp;
120 }
122 ReferenceProcessor::ReferenceProcessor(MemRegion span,
123 bool atomic_discovery,
124 bool mt_discovery,
125 int mt_degree,
126 bool mt_processing,
127 bool discovered_list_needs_barrier) :
128 _discovering_refs(false),
129 _enqueuing_is_done(false),
130 _is_alive_non_header(NULL),
131 _discovered_list_needs_barrier(discovered_list_needs_barrier),
132 _bs(NULL),
133 _processing_is_mt(mt_processing),
134 _next_id(0)
135 {
136 _span = span;
137 _discovery_is_atomic = atomic_discovery;
138 _discovery_is_mt = mt_discovery;
139 _num_q = mt_degree;
140 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _num_q * subclasses_of_ref);
141 if (_discoveredSoftRefs == NULL) {
142 vm_exit_during_initialization("Could not allocated RefProc Array");
143 }
144 _discoveredWeakRefs = &_discoveredSoftRefs[_num_q];
145 _discoveredFinalRefs = &_discoveredWeakRefs[_num_q];
146 _discoveredPhantomRefs = &_discoveredFinalRefs[_num_q];
147 assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
148 // Initialized all entries to _sentinelRef
149 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
150 _discoveredSoftRefs[i].set_head(sentinel_ref());
151 _discoveredSoftRefs[i].set_length(0);
152 }
153 // If we do barreirs, cache a copy of the barrier set.
154 if (discovered_list_needs_barrier) {
155 _bs = Universe::heap()->barrier_set();
156 }
157 }
159 #ifndef PRODUCT
160 void ReferenceProcessor::verify_no_references_recorded() {
161 guarantee(!_discovering_refs, "Discovering refs?");
162 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
163 guarantee(_discoveredSoftRefs[i].empty(),
164 "Found non-empty discovered list");
165 }
166 }
167 #endif
169 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
170 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
171 if (UseCompressedOops) {
172 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
173 } else {
174 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
175 }
176 }
177 }
179 void ReferenceProcessor::oops_do(OopClosure* f) {
180 f->do_oop(adr_sentinel_ref());
181 }
183 void ReferenceProcessor::update_soft_ref_master_clock() {
184 // Update (advance) the soft ref master clock field. This must be done
185 // after processing the soft ref list.
186 jlong now = os::javaTimeMillis();
187 jlong clock = java_lang_ref_SoftReference::clock();
188 NOT_PRODUCT(
189 if (now < clock) {
190 warning("time warp: %d to %d", clock, now);
191 }
192 )
193 // In product mode, protect ourselves from system time being adjusted
194 // externally and going backward; see note in the implementation of
195 // GenCollectedHeap::time_since_last_gc() for the right way to fix
196 // this uniformly throughout the VM; see bug-id 4741166. XXX
197 if (now > clock) {
198 java_lang_ref_SoftReference::set_clock(now);
199 }
200 // Else leave clock stalled at its old value until time progresses
201 // past clock value.
202 }
204 void ReferenceProcessor::process_discovered_references(
205 BoolObjectClosure* is_alive,
206 OopClosure* keep_alive,
207 VoidClosure* complete_gc,
208 AbstractRefProcTaskExecutor* task_executor) {
209 NOT_PRODUCT(verify_ok_to_handle_reflists());
211 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
212 // Stop treating discovered references specially.
213 disable_discovery();
215 bool trace_time = PrintGCDetails && PrintReferenceGC;
216 // Soft references
217 {
218 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
219 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
220 is_alive, keep_alive, complete_gc, task_executor);
221 }
223 update_soft_ref_master_clock();
225 // Weak references
226 {
227 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
228 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
229 is_alive, keep_alive, complete_gc, task_executor);
230 }
232 // Final references
233 {
234 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
235 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
236 is_alive, keep_alive, complete_gc, task_executor);
237 }
239 // Phantom references
240 {
241 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
242 process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
243 is_alive, keep_alive, complete_gc, task_executor);
244 }
246 // Weak global JNI references. It would make more sense (semantically) to
247 // traverse these simultaneously with the regular weak references above, but
248 // that is not how the JDK1.2 specification is. See #4126360. Native code can
249 // thus use JNI weak references to circumvent the phantom references and
250 // resurrect a "post-mortem" object.
251 {
252 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
253 if (task_executor != NULL) {
254 task_executor->set_single_threaded_mode();
255 }
256 process_phaseJNI(is_alive, keep_alive, complete_gc);
257 }
258 }
260 #ifndef PRODUCT
261 // Calculate the number of jni handles.
262 uint ReferenceProcessor::count_jni_refs() {
263 class AlwaysAliveClosure: public BoolObjectClosure {
264 public:
265 virtual bool do_object_b(oop obj) { return true; }
266 virtual void do_object(oop obj) { assert(false, "Don't call"); }
267 };
269 class CountHandleClosure: public OopClosure {
270 private:
271 int _count;
272 public:
273 CountHandleClosure(): _count(0) {}
274 void do_oop(oop* unused) { _count++; }
275 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
276 int count() { return _count; }
277 };
278 CountHandleClosure global_handle_count;
279 AlwaysAliveClosure always_alive;
280 JNIHandles::weak_oops_do(&always_alive, &global_handle_count);
281 return global_handle_count.count();
282 }
283 #endif
285 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
286 OopClosure* keep_alive,
287 VoidClosure* complete_gc) {
288 #ifndef PRODUCT
289 if (PrintGCDetails && PrintReferenceGC) {
290 unsigned int count = count_jni_refs();
291 gclog_or_tty->print(", %u refs", count);
292 }
293 #endif
294 JNIHandles::weak_oops_do(is_alive, keep_alive);
295 // Finally remember to keep sentinel around
296 keep_alive->do_oop(adr_sentinel_ref());
297 complete_gc->do_void();
298 }
301 template <class T>
302 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
303 AbstractRefProcTaskExecutor* task_executor) {
305 // Remember old value of pending references list
306 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
307 T old_pending_list_value = *pending_list_addr;
309 // Enqueue references that are not made active again, and
310 // clear the decks for the next collection (cycle).
311 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
312 // Do the oop-check on pending_list_addr missed in
313 // enqueue_discovered_reflist. We should probably
314 // do a raw oop_check so that future such idempotent
315 // oop_stores relying on the oop-check side-effect
316 // may be elided automatically and safely without
317 // affecting correctness.
318 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
320 // Stop treating discovered references specially.
321 ref->disable_discovery();
323 // Return true if new pending references were added
324 return old_pending_list_value != *pending_list_addr;
325 }
327 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
328 NOT_PRODUCT(verify_ok_to_handle_reflists());
329 if (UseCompressedOops) {
330 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
331 } else {
332 return enqueue_discovered_ref_helper<oop>(this, task_executor);
333 }
334 }
336 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
337 HeapWord* pending_list_addr) {
338 // Given a list of refs linked through the "discovered" field
339 // (java.lang.ref.Reference.discovered) chain them through the
340 // "next" field (java.lang.ref.Reference.next) and prepend
341 // to the pending list.
342 if (TraceReferenceGC && PrintGCDetails) {
343 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
344 INTPTR_FORMAT, (address)refs_list.head());
345 }
346 oop obj = refs_list.head();
347 // Walk down the list, copying the discovered field into
348 // the next field and clearing it (except for the last
349 // non-sentinel object which is treated specially to avoid
350 // confusion with an active reference).
351 while (obj != sentinel_ref()) {
352 assert(obj->is_instanceRef(), "should be reference object");
353 oop next = java_lang_ref_Reference::discovered(obj);
354 if (TraceReferenceGC && PrintGCDetails) {
355 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
356 obj, next);
357 }
358 assert(java_lang_ref_Reference::next(obj) == NULL,
359 "The reference should not be enqueued");
360 if (next == sentinel_ref()) { // obj is last
361 // Swap refs_list into pendling_list_addr and
362 // set obj's next to what we read from pending_list_addr.
363 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
364 // Need oop_check on pending_list_addr above;
365 // see special oop-check code at the end of
366 // enqueue_discovered_reflists() further below.
367 if (old == NULL) {
368 // obj should be made to point to itself, since
369 // pending list was empty.
370 java_lang_ref_Reference::set_next(obj, obj);
371 } else {
372 java_lang_ref_Reference::set_next(obj, old);
373 }
374 } else {
375 java_lang_ref_Reference::set_next(obj, next);
376 }
377 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
378 obj = next;
379 }
380 }
382 // Parallel enqueue task
383 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
384 public:
385 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
386 DiscoveredList discovered_refs[],
387 HeapWord* pending_list_addr,
388 oop sentinel_ref,
389 int n_queues)
390 : EnqueueTask(ref_processor, discovered_refs,
391 pending_list_addr, sentinel_ref, n_queues)
392 { }
394 virtual void work(unsigned int work_id) {
395 assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
396 // Simplest first cut: static partitioning.
397 int index = work_id;
398 for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) {
399 _ref_processor.enqueue_discovered_reflist(
400 _refs_lists[index], _pending_list_addr);
401 _refs_lists[index].set_head(_sentinel_ref);
402 _refs_lists[index].set_length(0);
403 }
404 }
405 };
407 // Enqueue references that are not made active again
408 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
409 AbstractRefProcTaskExecutor* task_executor) {
410 if (_processing_is_mt && task_executor != NULL) {
411 // Parallel code
412 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
413 pending_list_addr, sentinel_ref(), _num_q);
414 task_executor->execute(tsk);
415 } else {
416 // Serial code: call the parent class's implementation
417 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
418 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
419 _discoveredSoftRefs[i].set_head(sentinel_ref());
420 _discoveredSoftRefs[i].set_length(0);
421 }
422 }
423 }
425 // Iterator for the list of discovered references.
426 class DiscoveredListIterator {
427 public:
428 inline DiscoveredListIterator(DiscoveredList& refs_list,
429 OopClosure* keep_alive,
430 BoolObjectClosure* is_alive);
432 // End Of List.
433 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
435 // Get oop to the Reference object.
436 inline oop obj() const { return _ref; }
438 // Get oop to the referent object.
439 inline oop referent() const { return _referent; }
441 // Returns true if referent is alive.
442 inline bool is_referent_alive() const;
444 // Loads data for the current reference.
445 // The "allow_null_referent" argument tells us to allow for the possibility
446 // of a NULL referent in the discovered Reference object. This typically
447 // happens in the case of concurrent collectors that may have done the
448 // discovery concurrently, or interleaved, with mutator execution.
449 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
451 // Move to the next discovered reference.
452 inline void next();
454 // Remove the current reference from the list
455 inline void remove();
457 // Make the Reference object active again.
458 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
460 // Make the referent alive.
461 inline void make_referent_alive() {
462 if (UseCompressedOops) {
463 _keep_alive->do_oop((narrowOop*)_referent_addr);
464 } else {
465 _keep_alive->do_oop((oop*)_referent_addr);
466 }
467 }
469 // Update the discovered field.
470 inline void update_discovered() {
471 // First _prev_next ref actually points into DiscoveredList (gross).
472 if (UseCompressedOops) {
473 _keep_alive->do_oop((narrowOop*)_prev_next);
474 } else {
475 _keep_alive->do_oop((oop*)_prev_next);
476 }
477 }
479 // NULL out referent pointer.
480 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
482 // Statistics
483 NOT_PRODUCT(
484 inline size_t processed() const { return _processed; }
485 inline size_t removed() const { return _removed; }
486 )
488 inline void move_to_next();
490 private:
491 DiscoveredList& _refs_list;
492 HeapWord* _prev_next;
493 oop _ref;
494 HeapWord* _discovered_addr;
495 oop _next;
496 HeapWord* _referent_addr;
497 oop _referent;
498 OopClosure* _keep_alive;
499 BoolObjectClosure* _is_alive;
500 DEBUG_ONLY(
501 oop _first_seen; // cyclic linked list check
502 )
503 NOT_PRODUCT(
504 size_t _processed;
505 size_t _removed;
506 )
507 };
509 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
510 OopClosure* keep_alive,
511 BoolObjectClosure* is_alive)
512 : _refs_list(refs_list),
513 _prev_next(refs_list.adr_head()),
514 _ref(refs_list.head()),
515 #ifdef ASSERT
516 _first_seen(refs_list.head()),
517 #endif
518 #ifndef PRODUCT
519 _processed(0),
520 _removed(0),
521 #endif
522 _next(refs_list.head()),
523 _keep_alive(keep_alive),
524 _is_alive(is_alive)
525 { }
527 inline bool DiscoveredListIterator::is_referent_alive() const {
528 return _is_alive->do_object_b(_referent);
529 }
531 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
532 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
533 oop discovered = java_lang_ref_Reference::discovered(_ref);
534 assert(_discovered_addr && discovered->is_oop_or_null(),
535 "discovered field is bad");
536 _next = discovered;
537 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
538 _referent = java_lang_ref_Reference::referent(_ref);
539 assert(Universe::heap()->is_in_reserved_or_null(_referent),
540 "Wrong oop found in java.lang.Reference object");
541 assert(allow_null_referent ?
542 _referent->is_oop_or_null()
543 : _referent->is_oop(),
544 "bad referent");
545 }
547 inline void DiscoveredListIterator::next() {
548 _prev_next = _discovered_addr;
549 move_to_next();
550 }
552 inline void DiscoveredListIterator::remove() {
553 assert(_ref->is_oop(), "Dropping a bad reference");
554 oop_store_raw(_discovered_addr, NULL);
555 // First _prev_next ref actually points into DiscoveredList (gross).
556 if (UseCompressedOops) {
557 // Remove Reference object from list.
558 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
559 } else {
560 // Remove Reference object from list.
561 oopDesc::store_heap_oop((oop*)_prev_next, _next);
562 }
563 NOT_PRODUCT(_removed++);
564 _refs_list.dec_length(1);
565 }
567 inline void DiscoveredListIterator::move_to_next() {
568 _ref = _next;
569 assert(_ref != _first_seen, "cyclic ref_list found");
570 NOT_PRODUCT(_processed++);
571 }
573 // NOTE: process_phase*() are largely similar, and at a high level
574 // merely iterate over the extant list applying a predicate to
575 // each of its elements and possibly removing that element from the
576 // list and applying some further closures to that element.
577 // We should consider the possibility of replacing these
578 // process_phase*() methods by abstracting them into
579 // a single general iterator invocation that receives appropriate
580 // closures that accomplish this work.
582 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
583 // referents are not alive, but that should be kept alive for policy reasons.
584 // Keep alive the transitive closure of all such referents.
585 void
586 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
587 ReferencePolicy* policy,
588 BoolObjectClosure* is_alive,
589 OopClosure* keep_alive,
590 VoidClosure* complete_gc) {
591 assert(policy != NULL, "Must have a non-NULL policy");
592 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
593 // Decide which softly reachable refs should be kept alive.
594 while (iter.has_next()) {
595 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
596 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
597 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
598 if (TraceReferenceGC) {
599 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
600 iter.obj(), iter.obj()->blueprint()->internal_name());
601 }
602 // Remove Reference object from list
603 iter.remove();
604 // Make the Reference object active again
605 iter.make_active();
606 // keep the referent around
607 iter.make_referent_alive();
608 iter.move_to_next();
609 } else {
610 iter.next();
611 }
612 }
613 // Close the reachable set
614 complete_gc->do_void();
615 NOT_PRODUCT(
616 if (PrintGCDetails && TraceReferenceGC) {
617 gclog_or_tty->print(" Dropped %d dead Refs out of %d "
618 "discovered Refs by policy ", iter.removed(), iter.processed());
619 }
620 )
621 }
623 // Traverse the list and remove any Refs that are not active, or
624 // whose referents are either alive or NULL.
625 void
626 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
627 BoolObjectClosure* is_alive,
628 OopClosure* keep_alive) {
629 assert(discovery_is_atomic(), "Error");
630 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
631 while (iter.has_next()) {
632 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
633 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
634 assert(next == NULL, "Should not discover inactive Reference");
635 if (iter.is_referent_alive()) {
636 if (TraceReferenceGC) {
637 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
638 iter.obj(), iter.obj()->blueprint()->internal_name());
639 }
640 // The referent is reachable after all.
641 // Remove Reference object from list.
642 iter.remove();
643 // Update the referent pointer as necessary: Note that this
644 // should not entail any recursive marking because the
645 // referent must already have been traversed.
646 iter.make_referent_alive();
647 iter.move_to_next();
648 } else {
649 iter.next();
650 }
651 }
652 NOT_PRODUCT(
653 if (PrintGCDetails && TraceReferenceGC) {
654 gclog_or_tty->print(" Dropped %d active Refs out of %d "
655 "Refs in discovered list ", iter.removed(), iter.processed());
656 }
657 )
658 }
660 void
661 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
662 BoolObjectClosure* is_alive,
663 OopClosure* keep_alive,
664 VoidClosure* complete_gc) {
665 assert(!discovery_is_atomic(), "Error");
666 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
667 while (iter.has_next()) {
668 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
669 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
670 oop next = java_lang_ref_Reference::next(iter.obj());
671 if ((iter.referent() == NULL || iter.is_referent_alive() ||
672 next != NULL)) {
673 assert(next->is_oop_or_null(), "bad next field");
674 // Remove Reference object from list
675 iter.remove();
676 // Trace the cohorts
677 iter.make_referent_alive();
678 if (UseCompressedOops) {
679 keep_alive->do_oop((narrowOop*)next_addr);
680 } else {
681 keep_alive->do_oop((oop*)next_addr);
682 }
683 iter.move_to_next();
684 } else {
685 iter.next();
686 }
687 }
688 // Now close the newly reachable set
689 complete_gc->do_void();
690 NOT_PRODUCT(
691 if (PrintGCDetails && TraceReferenceGC) {
692 gclog_or_tty->print(" Dropped %d active Refs out of %d "
693 "Refs in discovered list ", iter.removed(), iter.processed());
694 }
695 )
696 }
698 // Traverse the list and process the referents, by either
699 // clearing them or keeping them (and their reachable
700 // closure) alive.
701 void
702 ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
703 bool clear_referent,
704 BoolObjectClosure* is_alive,
705 OopClosure* keep_alive,
706 VoidClosure* complete_gc) {
707 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
708 while (iter.has_next()) {
709 iter.update_discovered();
710 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
711 if (clear_referent) {
712 // NULL out referent pointer
713 iter.clear_referent();
714 } else {
715 // keep the referent around
716 iter.make_referent_alive();
717 }
718 if (TraceReferenceGC) {
719 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
720 clear_referent ? "cleared " : "",
721 iter.obj(), iter.obj()->blueprint()->internal_name());
722 }
723 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
724 iter.next();
725 }
726 // Remember to keep sentinel pointer around
727 iter.update_discovered();
728 // Close the reachable set
729 complete_gc->do_void();
730 }
732 void
733 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
734 oop obj = refs_list.head();
735 while (obj != sentinel_ref()) {
736 oop discovered = java_lang_ref_Reference::discovered(obj);
737 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
738 obj = discovered;
739 }
740 refs_list.set_head(sentinel_ref());
741 refs_list.set_length(0);
742 }
744 void ReferenceProcessor::abandon_partial_discovery() {
745 // loop over the lists
746 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
747 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
748 gclog_or_tty->print_cr(
749 "\nAbandoning %s discovered list",
750 list_name(i));
751 }
752 abandon_partial_discovered_list(_discoveredSoftRefs[i]);
753 }
754 }
756 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
757 public:
758 RefProcPhase1Task(ReferenceProcessor& ref_processor,
759 DiscoveredList refs_lists[],
760 ReferencePolicy* policy,
761 bool marks_oops_alive)
762 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
763 _policy(policy)
764 { }
765 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
766 OopClosure& keep_alive,
767 VoidClosure& complete_gc)
768 {
769 _ref_processor.process_phase1(_refs_lists[i], _policy,
770 &is_alive, &keep_alive, &complete_gc);
771 }
772 private:
773 ReferencePolicy* _policy;
774 };
776 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
777 public:
778 RefProcPhase2Task(ReferenceProcessor& ref_processor,
779 DiscoveredList refs_lists[],
780 bool marks_oops_alive)
781 : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
782 { }
783 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
784 OopClosure& keep_alive,
785 VoidClosure& complete_gc)
786 {
787 _ref_processor.process_phase2(_refs_lists[i],
788 &is_alive, &keep_alive, &complete_gc);
789 }
790 };
792 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
793 public:
794 RefProcPhase3Task(ReferenceProcessor& ref_processor,
795 DiscoveredList refs_lists[],
796 bool clear_referent,
797 bool marks_oops_alive)
798 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
799 _clear_referent(clear_referent)
800 { }
801 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
802 OopClosure& keep_alive,
803 VoidClosure& complete_gc)
804 {
805 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
806 &is_alive, &keep_alive, &complete_gc);
807 }
808 private:
809 bool _clear_referent;
810 };
812 // Balances reference queues.
813 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
814 {
815 // calculate total length
816 size_t total_refs = 0;
817 for (int i = 0; i < _num_q; ++i) {
818 total_refs += ref_lists[i].length();
819 }
820 size_t avg_refs = total_refs / _num_q + 1;
821 int to_idx = 0;
822 for (int from_idx = 0; from_idx < _num_q; from_idx++) {
823 while (ref_lists[from_idx].length() > avg_refs) {
824 assert(to_idx < _num_q, "Sanity Check!");
825 if (ref_lists[to_idx].length() < avg_refs) {
826 // move superfluous refs
827 size_t refs_to_move =
828 MIN2(ref_lists[from_idx].length() - avg_refs,
829 avg_refs - ref_lists[to_idx].length());
830 oop move_head = ref_lists[from_idx].head();
831 oop move_tail = move_head;
832 oop new_head = move_head;
833 // find an element to split the list on
834 for (size_t j = 0; j < refs_to_move; ++j) {
835 move_tail = new_head;
836 new_head = java_lang_ref_Reference::discovered(new_head);
837 }
838 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
839 ref_lists[to_idx].set_head(move_head);
840 ref_lists[to_idx].inc_length(refs_to_move);
841 ref_lists[from_idx].set_head(new_head);
842 ref_lists[from_idx].dec_length(refs_to_move);
843 } else {
844 ++to_idx;
845 }
846 }
847 }
848 }
850 void
851 ReferenceProcessor::process_discovered_reflist(
852 DiscoveredList refs_lists[],
853 ReferencePolicy* policy,
854 bool clear_referent,
855 BoolObjectClosure* is_alive,
856 OopClosure* keep_alive,
857 VoidClosure* complete_gc,
858 AbstractRefProcTaskExecutor* task_executor)
859 {
860 bool mt = task_executor != NULL && _processing_is_mt;
861 if (mt && ParallelRefProcBalancingEnabled) {
862 balance_queues(refs_lists);
863 }
864 if (PrintReferenceGC && PrintGCDetails) {
865 size_t total = 0;
866 for (int i = 0; i < _num_q; ++i) {
867 total += refs_lists[i].length();
868 }
869 gclog_or_tty->print(", %u refs", total);
870 }
872 // Phase 1 (soft refs only):
873 // . Traverse the list and remove any SoftReferences whose
874 // referents are not alive, but that should be kept alive for
875 // policy reasons. Keep alive the transitive closure of all
876 // such referents.
877 if (policy != NULL) {
878 if (mt) {
879 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
880 task_executor->execute(phase1);
881 } else {
882 for (int i = 0; i < _num_q; i++) {
883 process_phase1(refs_lists[i], policy,
884 is_alive, keep_alive, complete_gc);
885 }
886 }
887 } else { // policy == NULL
888 assert(refs_lists != _discoveredSoftRefs,
889 "Policy must be specified for soft references.");
890 }
892 // Phase 2:
893 // . Traverse the list and remove any refs whose referents are alive.
894 if (mt) {
895 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
896 task_executor->execute(phase2);
897 } else {
898 for (int i = 0; i < _num_q; i++) {
899 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
900 }
901 }
903 // Phase 3:
904 // . Traverse the list and process referents as appropriate.
905 if (mt) {
906 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
907 task_executor->execute(phase3);
908 } else {
909 for (int i = 0; i < _num_q; i++) {
910 process_phase3(refs_lists[i], clear_referent,
911 is_alive, keep_alive, complete_gc);
912 }
913 }
914 }
916 void ReferenceProcessor::clean_up_discovered_references() {
917 // loop over the lists
918 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
919 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
920 gclog_or_tty->print_cr(
921 "\nScrubbing %s discovered list of Null referents",
922 list_name(i));
923 }
924 clean_up_discovered_reflist(_discoveredSoftRefs[i]);
925 }
926 }
928 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
929 assert(!discovery_is_atomic(), "Else why call this method?");
930 DiscoveredListIterator iter(refs_list, NULL, NULL);
931 while (iter.has_next()) {
932 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
933 oop next = java_lang_ref_Reference::next(iter.obj());
934 assert(next->is_oop_or_null(), "bad next field");
935 // If referent has been cleared or Reference is not active,
936 // drop it.
937 if (iter.referent() == NULL || next != NULL) {
938 debug_only(
939 if (PrintGCDetails && TraceReferenceGC) {
940 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
941 INTPTR_FORMAT " with next field: " INTPTR_FORMAT
942 " and referent: " INTPTR_FORMAT,
943 iter.obj(), next, iter.referent());
944 }
945 )
946 // Remove Reference object from list
947 iter.remove();
948 iter.move_to_next();
949 } else {
950 iter.next();
951 }
952 }
953 NOT_PRODUCT(
954 if (PrintGCDetails && TraceReferenceGC) {
955 gclog_or_tty->print(
956 " Removed %d Refs with NULL referents out of %d discovered Refs",
957 iter.removed(), iter.processed());
958 }
959 )
960 }
962 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
963 int id = 0;
964 // Determine the queue index to use for this object.
965 if (_discovery_is_mt) {
966 // During a multi-threaded discovery phase,
967 // each thread saves to its "own" list.
968 Thread* thr = Thread::current();
969 assert(thr->is_GC_task_thread(),
970 "Dubious cast from Thread* to WorkerThread*?");
971 id = ((WorkerThread*)thr)->id();
972 } else {
973 // single-threaded discovery, we save in round-robin
974 // fashion to each of the lists.
975 if (_processing_is_mt) {
976 id = next_id();
977 }
978 }
979 assert(0 <= id && id < _num_q, "Id is out-of-bounds (call Freud?)");
981 // Get the discovered queue to which we will add
982 DiscoveredList* list = NULL;
983 switch (rt) {
984 case REF_OTHER:
985 // Unknown reference type, no special treatment
986 break;
987 case REF_SOFT:
988 list = &_discoveredSoftRefs[id];
989 break;
990 case REF_WEAK:
991 list = &_discoveredWeakRefs[id];
992 break;
993 case REF_FINAL:
994 list = &_discoveredFinalRefs[id];
995 break;
996 case REF_PHANTOM:
997 list = &_discoveredPhantomRefs[id];
998 break;
999 case REF_NONE:
1000 // we should not reach here if we are an instanceRefKlass
1001 default:
1002 ShouldNotReachHere();
1003 }
1004 return list;
1005 }
1007 inline void
1008 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1009 oop obj,
1010 HeapWord* discovered_addr) {
1011 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1012 // First we must make sure this object is only enqueued once. CAS in a non null
1013 // discovered_addr.
1014 oop current_head = refs_list.head();
1016 // Note: In the case of G1, this specific pre-barrier is strictly
1017 // not necessary because the only case we are interested in
1018 // here is when *discovered_addr is NULL (see the CAS further below),
1019 // so this will expand to nothing. As a result, we have manually
1020 // elided this out for G1, but left in the test for some future
1021 // collector that might have need for a pre-barrier here.
1022 if (_discovered_list_needs_barrier && !UseG1GC) {
1023 if (UseCompressedOops) {
1024 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
1025 } else {
1026 _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
1027 }
1028 guarantee(false, "Need to check non-G1 collector");
1029 }
1030 oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
1031 NULL);
1032 if (retest == NULL) {
1033 // This thread just won the right to enqueue the object.
1034 // We have separate lists for enqueueing so no synchronization
1035 // is necessary.
1036 refs_list.set_head(obj);
1037 refs_list.inc_length(1);
1038 if (_discovered_list_needs_barrier) {
1039 _bs->write_ref_field((void*)discovered_addr, current_head);
1040 }
1041 } else {
1042 // If retest was non NULL, another thread beat us to it:
1043 // The reference has already been discovered...
1044 if (TraceReferenceGC) {
1045 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1046 obj, obj->blueprint()->internal_name());
1047 }
1048 }
1049 }
1051 // We mention two of several possible choices here:
1052 // #0: if the reference object is not in the "originating generation"
1053 // (or part of the heap being collected, indicated by our "span"
1054 // we don't treat it specially (i.e. we scan it as we would
1055 // a normal oop, treating its references as strong references).
1056 // This means that references can't be enqueued unless their
1057 // referent is also in the same span. This is the simplest,
1058 // most "local" and most conservative approach, albeit one
1059 // that may cause weak references to be enqueued least promptly.
1060 // We call this choice the "ReferenceBasedDiscovery" policy.
1061 // #1: the reference object may be in any generation (span), but if
1062 // the referent is in the generation (span) being currently collected
1063 // then we can discover the reference object, provided
1064 // the object has not already been discovered by
1065 // a different concurrently running collector (as may be the
1066 // case, for instance, if the reference object is in CMS and
1067 // the referent in DefNewGeneration), and provided the processing
1068 // of this reference object by the current collector will
1069 // appear atomic to every other collector in the system.
1070 // (Thus, for instance, a concurrent collector may not
1071 // discover references in other generations even if the
1072 // referent is in its own generation). This policy may,
1073 // in certain cases, enqueue references somewhat sooner than
1074 // might Policy #0 above, but at marginally increased cost
1075 // and complexity in processing these references.
1076 // We call this choice the "RefeferentBasedDiscovery" policy.
1077 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1078 // We enqueue references only if we are discovering refs
1079 // (rather than processing discovered refs).
1080 if (!_discovering_refs || !RegisterReferences) {
1081 return false;
1082 }
1083 // We only enqueue active references.
1084 oop next = java_lang_ref_Reference::next(obj);
1085 if (next != NULL) {
1086 return false;
1087 }
1089 HeapWord* obj_addr = (HeapWord*)obj;
1090 if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1091 !_span.contains(obj_addr)) {
1092 // Reference is not in the originating generation;
1093 // don't treat it specially (i.e. we want to scan it as a normal
1094 // object with strong references).
1095 return false;
1096 }
1098 // We only enqueue references whose referents are not (yet) strongly
1099 // reachable.
1100 if (is_alive_non_header() != NULL) {
1101 oop referent = java_lang_ref_Reference::referent(obj);
1102 // In the case of non-concurrent discovery, the last
1103 // disjunct below should hold. It may not hold in the
1104 // case of concurrent discovery because mutators may
1105 // concurrently clear() a Reference.
1106 assert(UseConcMarkSweepGC || UseG1GC || referent != NULL,
1107 "Refs with null referents already filtered");
1108 if (is_alive_non_header()->do_object_b(referent)) {
1109 return false; // referent is reachable
1110 }
1111 }
1112 if (rt == REF_SOFT) {
1113 // For soft refs we can decide now if these are not
1114 // current candidates for clearing, in which case we
1115 // can mark through them now, rather than delaying that
1116 // to the reference-processing phase. Since all current
1117 // time-stamp policies advance the soft-ref clock only
1118 // at a major collection cycle, this is always currently
1119 // accurate.
1120 if (!_current_soft_ref_policy->should_clear_reference(obj)) {
1121 return false;
1122 }
1123 }
1125 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1126 const oop discovered = java_lang_ref_Reference::discovered(obj);
1127 assert(discovered->is_oop_or_null(), "bad discovered field");
1128 if (discovered != NULL) {
1129 // The reference has already been discovered...
1130 if (TraceReferenceGC) {
1131 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1132 obj, obj->blueprint()->internal_name());
1133 }
1134 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1135 // assumes that an object is not processed twice;
1136 // if it's been already discovered it must be on another
1137 // generation's discovered list; so we won't discover it.
1138 return false;
1139 } else {
1140 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1141 "Unrecognized policy");
1142 // Check assumption that an object is not potentially
1143 // discovered twice except by concurrent collectors that potentially
1144 // trace the same Reference object twice.
1145 assert(UseConcMarkSweepGC,
1146 "Only possible with an incremental-update concurrent collector");
1147 return true;
1148 }
1149 }
1151 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1152 oop referent = java_lang_ref_Reference::referent(obj);
1153 assert(referent->is_oop(), "bad referent");
1154 // enqueue if and only if either:
1155 // reference is in our span or
1156 // we are an atomic collector and referent is in our span
1157 if (_span.contains(obj_addr) ||
1158 (discovery_is_atomic() && _span.contains(referent))) {
1159 // should_enqueue = true;
1160 } else {
1161 return false;
1162 }
1163 } else {
1164 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1165 _span.contains(obj_addr), "code inconsistency");
1166 }
1168 // Get the right type of discovered queue head.
1169 DiscoveredList* list = get_discovered_list(rt);
1170 if (list == NULL) {
1171 return false; // nothing special needs to be done
1172 }
1174 if (_discovery_is_mt) {
1175 add_to_discovered_list_mt(*list, obj, discovered_addr);
1176 } else {
1177 // If "_discovered_list_needs_barrier", we do write barriers when
1178 // updating the discovered reference list. Otherwise, we do a raw store
1179 // here: the field will be visited later when processing the discovered
1180 // references.
1181 oop current_head = list->head();
1182 // As in the case further above, since we are over-writing a NULL
1183 // pre-value, we can safely elide the pre-barrier here for the case of G1.
1184 assert(discovered == NULL, "control point invariant");
1185 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
1186 if (UseCompressedOops) {
1187 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
1188 } else {
1189 _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
1190 }
1191 guarantee(false, "Need to check non-G1 collector");
1192 }
1193 oop_store_raw(discovered_addr, current_head);
1194 if (_discovered_list_needs_barrier) {
1195 _bs->write_ref_field((void*)discovered_addr, current_head);
1196 }
1197 list->set_head(obj);
1198 list->inc_length(1);
1199 }
1201 // In the MT discovery case, it is currently possible to see
1202 // the following message multiple times if several threads
1203 // discover a reference about the same time. Only one will
1204 // however have actually added it to the disocvered queue.
1205 // One could let add_to_discovered_list_mt() return an
1206 // indication for success in queueing (by 1 thread) or
1207 // failure (by all other threads), but I decided the extra
1208 // code was not worth the effort for something that is
1209 // only used for debugging support.
1210 if (TraceReferenceGC) {
1211 oop referent = java_lang_ref_Reference::referent(obj);
1212 if (PrintGCDetails) {
1213 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
1214 obj, obj->blueprint()->internal_name());
1215 }
1216 assert(referent->is_oop(), "Enqueued a bad referent");
1217 }
1218 assert(obj->is_oop(), "Enqueued a bad reference");
1219 return true;
1220 }
1222 // Preclean the discovered references by removing those
1223 // whose referents are alive, and by marking from those that
1224 // are not active. These lists can be handled here
1225 // in any order and, indeed, concurrently.
1226 void ReferenceProcessor::preclean_discovered_references(
1227 BoolObjectClosure* is_alive,
1228 OopClosure* keep_alive,
1229 VoidClosure* complete_gc,
1230 YieldClosure* yield,
1231 bool should_unload_classes) {
1233 NOT_PRODUCT(verify_ok_to_handle_reflists());
1235 #ifdef ASSERT
1236 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
1237 CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
1238 ExplicitGCInvokesConcurrentAndUnloadsClasses &&
1239 UseConcMarkSweepGC && should_unload_classes;
1240 RememberKlassesChecker mx(must_remember_klasses);
1241 #endif
1242 // Soft references
1243 {
1244 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
1245 false, gclog_or_tty);
1246 for (int i = 0; i < _num_q; i++) {
1247 if (yield->should_return()) {
1248 return;
1249 }
1250 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
1251 keep_alive, complete_gc, yield);
1252 }
1253 }
1255 // Weak references
1256 {
1257 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
1258 false, gclog_or_tty);
1259 for (int i = 0; i < _num_q; i++) {
1260 if (yield->should_return()) {
1261 return;
1262 }
1263 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
1264 keep_alive, complete_gc, yield);
1265 }
1266 }
1268 // Final references
1269 {
1270 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
1271 false, gclog_or_tty);
1272 for (int i = 0; i < _num_q; i++) {
1273 if (yield->should_return()) {
1274 return;
1275 }
1276 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
1277 keep_alive, complete_gc, yield);
1278 }
1279 }
1281 // Phantom references
1282 {
1283 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
1284 false, gclog_or_tty);
1285 for (int i = 0; i < _num_q; i++) {
1286 if (yield->should_return()) {
1287 return;
1288 }
1289 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
1290 keep_alive, complete_gc, yield);
1291 }
1292 }
1293 }
1295 // Walk the given discovered ref list, and remove all reference objects
1296 // whose referents are still alive, whose referents are NULL or which
1297 // are not active (have a non-NULL next field). NOTE: When we are
1298 // thus precleaning the ref lists (which happens single-threaded today),
1299 // we do not disable refs discovery to honour the correct semantics of
1300 // java.lang.Reference. As a result, we need to be careful below
1301 // that ref removal steps interleave safely with ref discovery steps
1302 // (in this thread).
1303 void
1304 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
1305 BoolObjectClosure* is_alive,
1306 OopClosure* keep_alive,
1307 VoidClosure* complete_gc,
1308 YieldClosure* yield) {
1309 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
1310 while (iter.has_next()) {
1311 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1312 oop obj = iter.obj();
1313 oop next = java_lang_ref_Reference::next(obj);
1314 if (iter.referent() == NULL || iter.is_referent_alive() ||
1315 next != NULL) {
1316 // The referent has been cleared, or is alive, or the Reference is not
1317 // active; we need to trace and mark its cohort.
1318 if (TraceReferenceGC) {
1319 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
1320 iter.obj(), iter.obj()->blueprint()->internal_name());
1321 }
1322 // Remove Reference object from list
1323 iter.remove();
1324 // Keep alive its cohort.
1325 iter.make_referent_alive();
1326 if (UseCompressedOops) {
1327 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
1328 keep_alive->do_oop(next_addr);
1329 } else {
1330 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
1331 keep_alive->do_oop(next_addr);
1332 }
1333 iter.move_to_next();
1334 } else {
1335 iter.next();
1336 }
1337 }
1338 // Close the reachable set
1339 complete_gc->do_void();
1341 NOT_PRODUCT(
1342 if (PrintGCDetails && PrintReferenceGC) {
1343 gclog_or_tty->print(" Dropped %d Refs out of %d "
1344 "Refs in discovered list ", iter.removed(), iter.processed());
1345 }
1346 )
1347 }
1349 const char* ReferenceProcessor::list_name(int i) {
1350 assert(i >= 0 && i <= _num_q * subclasses_of_ref, "Out of bounds index");
1351 int j = i / _num_q;
1352 switch (j) {
1353 case 0: return "SoftRef";
1354 case 1: return "WeakRef";
1355 case 2: return "FinalRef";
1356 case 3: return "PhantomRef";
1357 }
1358 ShouldNotReachHere();
1359 return NULL;
1360 }
1362 #ifndef PRODUCT
1363 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1364 // empty for now
1365 }
1366 #endif
1368 void ReferenceProcessor::verify() {
1369 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
1370 }
1372 #ifndef PRODUCT
1373 void ReferenceProcessor::clear_discovered_references() {
1374 guarantee(!_discovering_refs, "Discovering refs?");
1375 for (int i = 0; i < _num_q * subclasses_of_ref; i++) {
1376 oop obj = _discoveredSoftRefs[i].head();
1377 while (obj != sentinel_ref()) {
1378 oop next = java_lang_ref_Reference::discovered(obj);
1379 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
1380 obj = next;
1381 }
1382 _discoveredSoftRefs[i].set_head(sentinel_ref());
1383 _discoveredSoftRefs[i].set_length(0);
1384 }
1385 }
1386 #endif // PRODUCT