src/share/vm/memory/referenceProcessor.hpp

Wed, 12 Oct 2011 10:25:51 -0700

author
johnc
date
Wed, 12 Oct 2011 10:25:51 -0700
changeset 3188
d1bdeef3e3e2
parent 3175
4dfb2df418f2
child 3210
bf2d2b8b1726
permissions
-rw-r--r--

7098282: G1: assert(interval >= 0) failed: Sanity check, referencePolicy.cpp: 76
Summary: There is a race between one thread successfully forwarding and copying the klass mirror for the SoftReference class (including the static master clock) and another thread attempting to use the master clock while attempting to discover a soft reference object. Maintain a shadow copy of the soft reference master clock and use the shadow during reference discovery and reference processing.
Reviewed-by: tonyp, brutisso, ysr

duke@435 1 /*
ysr@2651 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
stefank@2314 27
stefank@2314 28 #include "memory/referencePolicy.hpp"
stefank@2314 29 #include "oops/instanceRefKlass.hpp"
stefank@2314 30
duke@435 31 // ReferenceProcessor class encapsulates the per-"collector" processing
ysr@888 32 // of java.lang.Reference objects for GC. The interface is useful for supporting
duke@435 33 // a generational abstraction, in particular when there are multiple
duke@435 34 // generations that are being independently collected -- possibly
duke@435 35 // concurrently and/or incrementally. Note, however, that the
duke@435 36 // ReferenceProcessor class abstracts away from a generational setting
duke@435 37 // by using only a heap interval (called "span" below), thus allowing
duke@435 38 // its use in a straightforward manner in a general, non-generational
duke@435 39 // setting.
duke@435 40 //
duke@435 41 // The basic idea is that each ReferenceProcessor object concerns
duke@435 42 // itself with ("weak") reference processing in a specific "span"
duke@435 43 // of the heap of interest to a specific collector. Currently,
duke@435 44 // the span is a convex interval of the heap, but, efficiency
duke@435 45 // apart, there seems to be no reason it couldn't be extended
duke@435 46 // (with appropriate modifications) to any "non-convex interval".
duke@435 47
duke@435 48 // forward references
duke@435 49 class ReferencePolicy;
duke@435 50 class AbstractRefProcTaskExecutor;
johnc@3175 51
johnc@3175 52 // List of discovered references.
johnc@3175 53 class DiscoveredList {
johnc@3175 54 public:
johnc@3175 55 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
johnc@3175 56 oop head() const {
johnc@3175 57 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
johnc@3175 58 _oop_head;
johnc@3175 59 }
johnc@3175 60 HeapWord* adr_head() {
johnc@3175 61 return UseCompressedOops ? (HeapWord*)&_compressed_head :
johnc@3175 62 (HeapWord*)&_oop_head;
johnc@3175 63 }
johnc@3175 64 void set_head(oop o) {
johnc@3175 65 if (UseCompressedOops) {
johnc@3175 66 // Must compress the head ptr.
johnc@3175 67 _compressed_head = oopDesc::encode_heap_oop(o);
johnc@3175 68 } else {
johnc@3175 69 _oop_head = o;
johnc@3175 70 }
johnc@3175 71 }
johnc@3175 72 bool is_empty() const { return head() == NULL; }
johnc@3175 73 size_t length() { return _len; }
johnc@3175 74 void set_length(size_t len) { _len = len; }
johnc@3175 75 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
johnc@3175 76 void dec_length(size_t dec) { _len -= dec; }
johnc@3175 77 private:
johnc@3175 78 // Set value depending on UseCompressedOops. This could be a template class
johnc@3175 79 // but then we have to fix all the instantiations and declarations that use this class.
johnc@3175 80 oop _oop_head;
johnc@3175 81 narrowOop _compressed_head;
johnc@3175 82 size_t _len;
johnc@3175 83 };
johnc@3175 84
johnc@3175 85 // Iterator for the list of discovered references.
johnc@3175 86 class DiscoveredListIterator {
johnc@3175 87 private:
johnc@3175 88 DiscoveredList& _refs_list;
johnc@3175 89 HeapWord* _prev_next;
johnc@3175 90 oop _prev;
johnc@3175 91 oop _ref;
johnc@3175 92 HeapWord* _discovered_addr;
johnc@3175 93 oop _next;
johnc@3175 94 HeapWord* _referent_addr;
johnc@3175 95 oop _referent;
johnc@3175 96 OopClosure* _keep_alive;
johnc@3175 97 BoolObjectClosure* _is_alive;
johnc@3175 98
johnc@3175 99 DEBUG_ONLY(
johnc@3175 100 oop _first_seen; // cyclic linked list check
johnc@3175 101 )
johnc@3175 102
johnc@3175 103 NOT_PRODUCT(
johnc@3175 104 size_t _processed;
johnc@3175 105 size_t _removed;
johnc@3175 106 )
johnc@3175 107
johnc@3175 108 public:
johnc@3175 109 inline DiscoveredListIterator(DiscoveredList& refs_list,
johnc@3175 110 OopClosure* keep_alive,
johnc@3175 111 BoolObjectClosure* is_alive):
johnc@3175 112 _refs_list(refs_list),
johnc@3175 113 _prev_next(refs_list.adr_head()),
johnc@3175 114 _prev(NULL),
johnc@3175 115 _ref(refs_list.head()),
johnc@3175 116 #ifdef ASSERT
johnc@3175 117 _first_seen(refs_list.head()),
johnc@3175 118 #endif
johnc@3175 119 #ifndef PRODUCT
johnc@3175 120 _processed(0),
johnc@3175 121 _removed(0),
johnc@3175 122 #endif
johnc@3175 123 _next(NULL),
johnc@3175 124 _keep_alive(keep_alive),
johnc@3175 125 _is_alive(is_alive)
johnc@3175 126 { }
johnc@3175 127
johnc@3175 128 // End Of List.
johnc@3175 129 inline bool has_next() const { return _ref != NULL; }
johnc@3175 130
johnc@3175 131 // Get oop to the Reference object.
johnc@3175 132 inline oop obj() const { return _ref; }
johnc@3175 133
johnc@3175 134 // Get oop to the referent object.
johnc@3175 135 inline oop referent() const { return _referent; }
johnc@3175 136
johnc@3175 137 // Returns true if referent is alive.
johnc@3175 138 inline bool is_referent_alive() const {
johnc@3175 139 return _is_alive->do_object_b(_referent);
johnc@3175 140 }
johnc@3175 141
johnc@3175 142 // Loads data for the current reference.
johnc@3175 143 // The "allow_null_referent" argument tells us to allow for the possibility
johnc@3175 144 // of a NULL referent in the discovered Reference object. This typically
johnc@3175 145 // happens in the case of concurrent collectors that may have done the
johnc@3175 146 // discovery concurrently, or interleaved, with mutator execution.
johnc@3175 147 void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
johnc@3175 148
johnc@3175 149 // Move to the next discovered reference.
johnc@3175 150 inline void next() {
johnc@3175 151 _prev_next = _discovered_addr;
johnc@3175 152 _prev = _ref;
johnc@3175 153 move_to_next();
johnc@3175 154 }
johnc@3175 155
johnc@3175 156 // Remove the current reference from the list
johnc@3175 157 void remove();
johnc@3175 158
johnc@3175 159 // Make the Reference object active again.
johnc@3175 160 void make_active();
johnc@3175 161
johnc@3175 162 // Make the referent alive.
johnc@3175 163 inline void make_referent_alive() {
johnc@3175 164 if (UseCompressedOops) {
johnc@3175 165 _keep_alive->do_oop((narrowOop*)_referent_addr);
johnc@3175 166 } else {
johnc@3175 167 _keep_alive->do_oop((oop*)_referent_addr);
johnc@3175 168 }
johnc@3175 169 }
johnc@3175 170
johnc@3175 171 // Update the discovered field.
johnc@3175 172 inline void update_discovered() {
johnc@3175 173 // First _prev_next ref actually points into DiscoveredList (gross).
johnc@3175 174 if (UseCompressedOops) {
johnc@3175 175 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
johnc@3175 176 _keep_alive->do_oop((narrowOop*)_prev_next);
johnc@3175 177 }
johnc@3175 178 } else {
johnc@3175 179 if (!oopDesc::is_null(*(oop*)_prev_next)) {
johnc@3175 180 _keep_alive->do_oop((oop*)_prev_next);
johnc@3175 181 }
johnc@3175 182 }
johnc@3175 183 }
johnc@3175 184
johnc@3175 185 // NULL out referent pointer.
johnc@3175 186 void clear_referent();
johnc@3175 187
johnc@3175 188 // Statistics
johnc@3175 189 NOT_PRODUCT(
johnc@3175 190 inline size_t processed() const { return _processed; }
johnc@3175 191 inline size_t removed() const { return _removed; }
johnc@3175 192 )
johnc@3175 193
johnc@3175 194 inline void move_to_next() {
johnc@3175 195 if (_ref == _next) {
johnc@3175 196 // End of the list.
johnc@3175 197 _ref = NULL;
johnc@3175 198 } else {
johnc@3175 199 _ref = _next;
johnc@3175 200 }
johnc@3175 201 assert(_ref != _first_seen, "cyclic ref_list found");
johnc@3175 202 NOT_PRODUCT(_processed++);
johnc@3175 203 }
johnc@3175 204 };
duke@435 205
duke@435 206 class ReferenceProcessor : public CHeapObj {
duke@435 207 protected:
ysr@3117 208 // Compatibility with pre-4965777 JDK's
ysr@3117 209 static bool _pending_list_uses_discovered_field;
johnc@3175 210
johnc@3188 211 // The SoftReference master timestamp clock
johnc@3188 212 static jlong _soft_ref_timestamp_clock;
johnc@3188 213
johnc@3175 214 MemRegion _span; // (right-open) interval of heap
johnc@3175 215 // subject to wkref discovery
johnc@3175 216
johnc@3175 217 bool _discovering_refs; // true when discovery enabled
johnc@3175 218 bool _discovery_is_atomic; // if discovery is atomic wrt
johnc@3175 219 // other collectors in configuration
johnc@3175 220 bool _discovery_is_mt; // true if reference discovery is MT.
johnc@3175 221
ysr@777 222 // If true, setting "next" field of a discovered refs list requires
ysr@777 223 // write barrier(s). (Must be true if used in a collector in which
ysr@777 224 // elements of a discovered list may be moved during discovery: for
ysr@777 225 // example, a collector like Garbage-First that moves objects during a
ysr@777 226 // long-term concurrent marking phase that does weak reference
ysr@777 227 // discovery.)
ysr@777 228 bool _discovered_list_needs_barrier;
duke@435 229
johnc@3175 230 BarrierSet* _bs; // Cached copy of BarrierSet.
johnc@3175 231 bool _enqueuing_is_done; // true if all weak references enqueued
johnc@3175 232 bool _processing_is_mt; // true during phases when
johnc@3175 233 // reference processing is MT.
johnc@3175 234 int _next_id; // round-robin mod _num_q counter in
johnc@3175 235 // support of work distribution
johnc@3175 236
johnc@3175 237 // For collectors that do not keep GC liveness information
duke@435 238 // in the object header, this field holds a closure that
duke@435 239 // helps the reference processor determine the reachability
johnc@3175 240 // of an oop. It is currently initialized to NULL for all
johnc@3175 241 // collectors except for CMS and G1.
duke@435 242 BoolObjectClosure* _is_alive_non_header;
duke@435 243
ysr@888 244 // Soft ref clearing policies
ysr@888 245 // . the default policy
ysr@888 246 static ReferencePolicy* _default_soft_ref_policy;
ysr@888 247 // . the "clear all" policy
ysr@888 248 static ReferencePolicy* _always_clear_soft_ref_policy;
ysr@888 249 // . the current policy below is either one of the above
ysr@888 250 ReferencePolicy* _current_soft_ref_policy;
ysr@888 251
duke@435 252 // The discovered ref lists themselves
coleenp@548 253
jmasa@2188 254 // The active MT'ness degree of the queues below
coleenp@548 255 int _num_q;
jmasa@2188 256 // The maximum MT'ness degree of the queues below
jmasa@2188 257 int _max_num_q;
coleenp@548 258 // Arrays of lists of oops, one per thread
coleenp@548 259 DiscoveredList* _discoveredSoftRefs;
duke@435 260 DiscoveredList* _discoveredWeakRefs;
duke@435 261 DiscoveredList* _discoveredFinalRefs;
duke@435 262 DiscoveredList* _discoveredPhantomRefs;
duke@435 263
duke@435 264 public:
johnc@3175 265 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
johnc@3175 266
johnc@3175 267 int num_q() { return _num_q; }
johnc@3175 268 int max_num_q() { return _max_num_q; }
johnc@3175 269 void set_active_mt_degree(int v) { _num_q = v; }
johnc@3175 270 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
johnc@3175 271
ysr@892 272 ReferencePolicy* setup_policy(bool always_clear) {
ysr@888 273 _current_soft_ref_policy = always_clear ?
ysr@888 274 _always_clear_soft_ref_policy : _default_soft_ref_policy;
ysr@892 275 _current_soft_ref_policy->setup(); // snapshot the policy threshold
ysr@888 276 return _current_soft_ref_policy;
ysr@888 277 }
duke@435 278
duke@435 279 // Process references with a certain reachability level.
duke@435 280 void process_discovered_reflist(DiscoveredList refs_lists[],
duke@435 281 ReferencePolicy* policy,
duke@435 282 bool clear_referent,
duke@435 283 BoolObjectClosure* is_alive,
duke@435 284 OopClosure* keep_alive,
duke@435 285 VoidClosure* complete_gc,
duke@435 286 AbstractRefProcTaskExecutor* task_executor);
duke@435 287
duke@435 288 void process_phaseJNI(BoolObjectClosure* is_alive,
duke@435 289 OopClosure* keep_alive,
duke@435 290 VoidClosure* complete_gc);
duke@435 291
duke@435 292 // Work methods used by the method process_discovered_reflist
duke@435 293 // Phase1: keep alive all those referents that are otherwise
duke@435 294 // dead but which must be kept alive by policy (and their closure).
coleenp@548 295 void process_phase1(DiscoveredList& refs_list,
duke@435 296 ReferencePolicy* policy,
duke@435 297 BoolObjectClosure* is_alive,
duke@435 298 OopClosure* keep_alive,
duke@435 299 VoidClosure* complete_gc);
duke@435 300 // Phase2: remove all those references whose referents are
duke@435 301 // reachable.
coleenp@548 302 inline void process_phase2(DiscoveredList& refs_list,
duke@435 303 BoolObjectClosure* is_alive,
duke@435 304 OopClosure* keep_alive,
duke@435 305 VoidClosure* complete_gc) {
duke@435 306 if (discovery_is_atomic()) {
duke@435 307 // complete_gc is ignored in this case for this phase
coleenp@548 308 pp2_work(refs_list, is_alive, keep_alive);
duke@435 309 } else {
duke@435 310 assert(complete_gc != NULL, "Error");
coleenp@548 311 pp2_work_concurrent_discovery(refs_list, is_alive,
duke@435 312 keep_alive, complete_gc);
duke@435 313 }
duke@435 314 }
duke@435 315 // Work methods in support of process_phase2
coleenp@548 316 void pp2_work(DiscoveredList& refs_list,
duke@435 317 BoolObjectClosure* is_alive,
duke@435 318 OopClosure* keep_alive);
duke@435 319 void pp2_work_concurrent_discovery(
coleenp@548 320 DiscoveredList& refs_list,
duke@435 321 BoolObjectClosure* is_alive,
duke@435 322 OopClosure* keep_alive,
duke@435 323 VoidClosure* complete_gc);
duke@435 324 // Phase3: process the referents by either clearing them
duke@435 325 // or keeping them alive (and their closure)
coleenp@548 326 void process_phase3(DiscoveredList& refs_list,
duke@435 327 bool clear_referent,
duke@435 328 BoolObjectClosure* is_alive,
duke@435 329 OopClosure* keep_alive,
duke@435 330 VoidClosure* complete_gc);
duke@435 331
duke@435 332 // Enqueue references with a certain reachability level
coleenp@548 333 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
duke@435 334
duke@435 335 // "Preclean" all the discovered reference lists
duke@435 336 // by removing references with strongly reachable referents.
duke@435 337 // The first argument is a predicate on an oop that indicates
duke@435 338 // its (strong) reachability and the second is a closure that
duke@435 339 // may be used to incrementalize or abort the precleaning process.
duke@435 340 // The caller is responsible for taking care of potential
duke@435 341 // interference with concurrent operations on these lists
duke@435 342 // (or predicates involved) by other threads. Currently
jmasa@1625 343 // only used by the CMS collector. should_unload_classes is
jmasa@1625 344 // used to aid assertion checking when classes are collected.
duke@435 345 void preclean_discovered_references(BoolObjectClosure* is_alive,
duke@435 346 OopClosure* keep_alive,
duke@435 347 VoidClosure* complete_gc,
jmasa@1625 348 YieldClosure* yield,
jmasa@1625 349 bool should_unload_classes);
duke@435 350
duke@435 351 // Delete entries in the discovered lists that have
duke@435 352 // either a null referent or are not active. Such
duke@435 353 // Reference objects can result from the clearing
duke@435 354 // or enqueueing of Reference objects concurrent
duke@435 355 // with their discovery by a (concurrent) collector.
duke@435 356 // For a definition of "active" see java.lang.ref.Reference;
duke@435 357 // Refs are born active, become inactive when enqueued,
duke@435 358 // and never become active again. The state of being
duke@435 359 // active is encoded as follows: A Ref is active
duke@435 360 // if and only if its "next" field is NULL.
duke@435 361 void clean_up_discovered_references();
duke@435 362 void clean_up_discovered_reflist(DiscoveredList& refs_list);
duke@435 363
duke@435 364 // Returns the name of the discovered reference list
duke@435 365 // occupying the i / _num_q slot.
duke@435 366 const char* list_name(int i);
duke@435 367
coleenp@548 368 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
coleenp@548 369
duke@435 370 protected:
johnc@3175 371 // Set the 'discovered' field of the given reference to
johnc@3175 372 // the given value - emitting barriers depending upon
johnc@3175 373 // the value of _discovered_list_needs_barrier.
johnc@3175 374 void set_discovered(oop ref, oop value);
johnc@3175 375
duke@435 376 // "Preclean" the given discovered reference list
duke@435 377 // by removing references with strongly reachable referents.
duke@435 378 // Currently used in support of CMS only.
duke@435 379 void preclean_discovered_reflist(DiscoveredList& refs_list,
duke@435 380 BoolObjectClosure* is_alive,
duke@435 381 OopClosure* keep_alive,
duke@435 382 VoidClosure* complete_gc,
duke@435 383 YieldClosure* yield);
duke@435 384
ysr@2651 385 // round-robin mod _num_q (not: _not_ mode _max_num_q)
duke@435 386 int next_id() {
duke@435 387 int id = _next_id;
duke@435 388 if (++_next_id == _num_q) {
duke@435 389 _next_id = 0;
duke@435 390 }
duke@435 391 return id;
duke@435 392 }
duke@435 393 DiscoveredList* get_discovered_list(ReferenceType rt);
duke@435 394 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
coleenp@548 395 HeapWord* discovered_addr);
duke@435 396 void verify_ok_to_handle_reflists() PRODUCT_RETURN;
duke@435 397
stefank@3115 398 void clear_discovered_references(DiscoveredList& refs_list);
duke@435 399 void abandon_partial_discovered_list(DiscoveredList& refs_list);
duke@435 400
duke@435 401 // Calculate the number of jni handles.
duke@435 402 unsigned int count_jni_refs();
duke@435 403
duke@435 404 // Balances reference queues.
duke@435 405 void balance_queues(DiscoveredList ref_lists[]);
duke@435 406
duke@435 407 // Update (advance) the soft ref master clock field.
duke@435 408 void update_soft_ref_master_clock();
duke@435 409
duke@435 410 public:
duke@435 411 // constructor
duke@435 412 ReferenceProcessor():
duke@435 413 _span((HeapWord*)NULL, (HeapWord*)NULL),
duke@435 414 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL),
duke@435 415 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
duke@435 416 _discovering_refs(false),
duke@435 417 _discovery_is_atomic(true),
duke@435 418 _enqueuing_is_done(false),
duke@435 419 _discovery_is_mt(false),
ysr@777 420 _discovered_list_needs_barrier(false),
ysr@777 421 _bs(NULL),
duke@435 422 _is_alive_non_header(NULL),
duke@435 423 _num_q(0),
jmasa@2188 424 _max_num_q(0),
duke@435 425 _processing_is_mt(false),
duke@435 426 _next_id(0)
ysr@2651 427 { }
duke@435 428
ysr@2651 429 // Default parameters give you a vanilla reference processor.
ysr@2651 430 ReferenceProcessor(MemRegion span,
ysr@2651 431 bool mt_processing = false, int mt_processing_degree = 1,
ysr@2651 432 bool mt_discovery = false, int mt_discovery_degree = 1,
ysr@2651 433 bool atomic_discovery = true,
ysr@2651 434 BoolObjectClosure* is_alive_non_header = NULL,
ysr@777 435 bool discovered_list_needs_barrier = false);
duke@435 436
duke@435 437 // RefDiscoveryPolicy values
johnc@1679 438 enum DiscoveryPolicy {
duke@435 439 ReferenceBasedDiscovery = 0,
johnc@1679 440 ReferentBasedDiscovery = 1,
johnc@1679 441 DiscoveryPolicyMin = ReferenceBasedDiscovery,
johnc@1679 442 DiscoveryPolicyMax = ReferentBasedDiscovery
duke@435 443 };
duke@435 444
duke@435 445 static void init_statics();
duke@435 446
duke@435 447 public:
duke@435 448 // get and set "is_alive_non_header" field
duke@435 449 BoolObjectClosure* is_alive_non_header() {
duke@435 450 return _is_alive_non_header;
duke@435 451 }
duke@435 452 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
duke@435 453 _is_alive_non_header = is_alive_non_header;
duke@435 454 }
duke@435 455
duke@435 456 // get and set span
duke@435 457 MemRegion span() { return _span; }
duke@435 458 void set_span(MemRegion span) { _span = span; }
duke@435 459
duke@435 460 // start and stop weak ref discovery
johnc@3188 461 void enable_discovery(bool verify_disabled, bool check_no_refs);
duke@435 462 void disable_discovery() { _discovering_refs = false; }
duke@435 463 bool discovery_enabled() { return _discovering_refs; }
duke@435 464
duke@435 465 // whether discovery is atomic wrt other collectors
duke@435 466 bool discovery_is_atomic() const { return _discovery_is_atomic; }
duke@435 467 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
duke@435 468
ysr@3117 469 // whether the JDK in which we are embedded is a pre-4965777 JDK,
ysr@3117 470 // and thus whether or not it uses the discovered field to chain
ysr@3117 471 // the entries in the pending list.
ysr@3117 472 static bool pending_list_uses_discovered_field() {
ysr@3117 473 return _pending_list_uses_discovered_field;
ysr@3117 474 }
ysr@3117 475
duke@435 476 // whether discovery is done by multiple threads same-old-timeously
duke@435 477 bool discovery_is_mt() const { return _discovery_is_mt; }
duke@435 478 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
duke@435 479
duke@435 480 // Whether we are in a phase when _processing_ is MT.
duke@435 481 bool processing_is_mt() const { return _processing_is_mt; }
duke@435 482 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
duke@435 483
duke@435 484 // whether all enqueuing of weak references is complete
duke@435 485 bool enqueuing_is_done() { return _enqueuing_is_done; }
duke@435 486 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
duke@435 487
duke@435 488 // iterate over oops
duke@435 489 void weak_oops_do(OopClosure* f); // weak roots
duke@435 490
jmasa@2188 491 // Balance each of the discovered lists.
jmasa@2188 492 void balance_all_queues();
jmasa@2188 493
duke@435 494 // Discover a Reference object, using appropriate discovery criteria
duke@435 495 bool discover_reference(oop obj, ReferenceType rt);
duke@435 496
duke@435 497 // Process references found during GC (called by the garbage collector)
ysr@888 498 void process_discovered_references(BoolObjectClosure* is_alive,
duke@435 499 OopClosure* keep_alive,
duke@435 500 VoidClosure* complete_gc,
duke@435 501 AbstractRefProcTaskExecutor* task_executor);
duke@435 502
duke@435 503 public:
duke@435 504 // Enqueue references at end of GC (called by the garbage collector)
duke@435 505 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
duke@435 506
ysr@777 507 // If a discovery is in process that is being superceded, abandon it: all
ysr@777 508 // the discovered lists will be empty, and all the objects on them will
ysr@777 509 // have NULL discovered fields. Must be called only at a safepoint.
ysr@777 510 void abandon_partial_discovery();
ysr@777 511
duke@435 512 // debugging
duke@435 513 void verify_no_references_recorded() PRODUCT_RETURN;
ysr@2337 514 void verify_referent(oop obj) PRODUCT_RETURN;
duke@435 515
duke@435 516 // clear the discovered lists (unlinking each entry).
duke@435 517 void clear_discovered_references() PRODUCT_RETURN;
duke@435 518 };
duke@435 519
duke@435 520 // A utility class to disable reference discovery in
duke@435 521 // the scope which contains it, for given ReferenceProcessor.
duke@435 522 class NoRefDiscovery: StackObj {
duke@435 523 private:
duke@435 524 ReferenceProcessor* _rp;
duke@435 525 bool _was_discovering_refs;
duke@435 526 public:
duke@435 527 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
twisti@2144 528 _was_discovering_refs = _rp->discovery_enabled();
twisti@2144 529 if (_was_discovering_refs) {
duke@435 530 _rp->disable_discovery();
duke@435 531 }
duke@435 532 }
duke@435 533
duke@435 534 ~NoRefDiscovery() {
duke@435 535 if (_was_discovering_refs) {
johnc@3175 536 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
duke@435 537 }
duke@435 538 }
duke@435 539 };
duke@435 540
duke@435 541
duke@435 542 // A utility class to temporarily mutate the span of the
duke@435 543 // given ReferenceProcessor in the scope that contains it.
duke@435 544 class ReferenceProcessorSpanMutator: StackObj {
duke@435 545 private:
duke@435 546 ReferenceProcessor* _rp;
duke@435 547 MemRegion _saved_span;
duke@435 548
duke@435 549 public:
duke@435 550 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
duke@435 551 MemRegion span):
duke@435 552 _rp(rp) {
duke@435 553 _saved_span = _rp->span();
duke@435 554 _rp->set_span(span);
duke@435 555 }
duke@435 556
duke@435 557 ~ReferenceProcessorSpanMutator() {
duke@435 558 _rp->set_span(_saved_span);
duke@435 559 }
duke@435 560 };
duke@435 561
duke@435 562 // A utility class to temporarily change the MT'ness of
duke@435 563 // reference discovery for the given ReferenceProcessor
duke@435 564 // in the scope that contains it.
ysr@2651 565 class ReferenceProcessorMTDiscoveryMutator: StackObj {
duke@435 566 private:
duke@435 567 ReferenceProcessor* _rp;
duke@435 568 bool _saved_mt;
duke@435 569
duke@435 570 public:
ysr@2651 571 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
ysr@2651 572 bool mt):
duke@435 573 _rp(rp) {
duke@435 574 _saved_mt = _rp->discovery_is_mt();
duke@435 575 _rp->set_mt_discovery(mt);
duke@435 576 }
duke@435 577
ysr@2651 578 ~ReferenceProcessorMTDiscoveryMutator() {
duke@435 579 _rp->set_mt_discovery(_saved_mt);
duke@435 580 }
duke@435 581 };
duke@435 582
duke@435 583
duke@435 584 // A utility class to temporarily change the disposition
duke@435 585 // of the "is_alive_non_header" closure field of the
duke@435 586 // given ReferenceProcessor in the scope that contains it.
duke@435 587 class ReferenceProcessorIsAliveMutator: StackObj {
duke@435 588 private:
duke@435 589 ReferenceProcessor* _rp;
duke@435 590 BoolObjectClosure* _saved_cl;
duke@435 591
duke@435 592 public:
duke@435 593 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
duke@435 594 BoolObjectClosure* cl):
duke@435 595 _rp(rp) {
duke@435 596 _saved_cl = _rp->is_alive_non_header();
duke@435 597 _rp->set_is_alive_non_header(cl);
duke@435 598 }
duke@435 599
duke@435 600 ~ReferenceProcessorIsAliveMutator() {
duke@435 601 _rp->set_is_alive_non_header(_saved_cl);
duke@435 602 }
duke@435 603 };
duke@435 604
duke@435 605 // A utility class to temporarily change the disposition
duke@435 606 // of the "discovery_is_atomic" field of the
duke@435 607 // given ReferenceProcessor in the scope that contains it.
duke@435 608 class ReferenceProcessorAtomicMutator: StackObj {
duke@435 609 private:
duke@435 610 ReferenceProcessor* _rp;
duke@435 611 bool _saved_atomic_discovery;
duke@435 612
duke@435 613 public:
duke@435 614 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
duke@435 615 bool atomic):
duke@435 616 _rp(rp) {
duke@435 617 _saved_atomic_discovery = _rp->discovery_is_atomic();
duke@435 618 _rp->set_atomic_discovery(atomic);
duke@435 619 }
duke@435 620
duke@435 621 ~ReferenceProcessorAtomicMutator() {
duke@435 622 _rp->set_atomic_discovery(_saved_atomic_discovery);
duke@435 623 }
duke@435 624 };
duke@435 625
duke@435 626
duke@435 627 // A utility class to temporarily change the MT processing
duke@435 628 // disposition of the given ReferenceProcessor instance
duke@435 629 // in the scope that contains it.
duke@435 630 class ReferenceProcessorMTProcMutator: StackObj {
duke@435 631 private:
duke@435 632 ReferenceProcessor* _rp;
duke@435 633 bool _saved_mt;
duke@435 634
duke@435 635 public:
duke@435 636 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
duke@435 637 bool mt):
duke@435 638 _rp(rp) {
duke@435 639 _saved_mt = _rp->processing_is_mt();
duke@435 640 _rp->set_mt_processing(mt);
duke@435 641 }
duke@435 642
duke@435 643 ~ReferenceProcessorMTProcMutator() {
duke@435 644 _rp->set_mt_processing(_saved_mt);
duke@435 645 }
duke@435 646 };
duke@435 647
duke@435 648
duke@435 649 // This class is an interface used to implement task execution for the
duke@435 650 // reference processing.
duke@435 651 class AbstractRefProcTaskExecutor {
duke@435 652 public:
duke@435 653
duke@435 654 // Abstract tasks to execute.
duke@435 655 class ProcessTask;
duke@435 656 class EnqueueTask;
duke@435 657
duke@435 658 // Executes a task using worker threads.
duke@435 659 virtual void execute(ProcessTask& task) = 0;
duke@435 660 virtual void execute(EnqueueTask& task) = 0;
duke@435 661
duke@435 662 // Switch to single threaded mode.
duke@435 663 virtual void set_single_threaded_mode() { };
duke@435 664 };
duke@435 665
duke@435 666 // Abstract reference processing task to execute.
duke@435 667 class AbstractRefProcTaskExecutor::ProcessTask {
duke@435 668 protected:
duke@435 669 ProcessTask(ReferenceProcessor& ref_processor,
duke@435 670 DiscoveredList refs_lists[],
duke@435 671 bool marks_oops_alive)
duke@435 672 : _ref_processor(ref_processor),
duke@435 673 _refs_lists(refs_lists),
duke@435 674 _marks_oops_alive(marks_oops_alive)
duke@435 675 { }
duke@435 676
duke@435 677 public:
duke@435 678 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
duke@435 679 OopClosure& keep_alive,
duke@435 680 VoidClosure& complete_gc) = 0;
duke@435 681
duke@435 682 // Returns true if a task marks some oops as alive.
duke@435 683 bool marks_oops_alive() const
duke@435 684 { return _marks_oops_alive; }
duke@435 685
duke@435 686 protected:
duke@435 687 ReferenceProcessor& _ref_processor;
duke@435 688 DiscoveredList* _refs_lists;
duke@435 689 const bool _marks_oops_alive;
duke@435 690 };
duke@435 691
duke@435 692 // Abstract reference processing task to execute.
duke@435 693 class AbstractRefProcTaskExecutor::EnqueueTask {
duke@435 694 protected:
duke@435 695 EnqueueTask(ReferenceProcessor& ref_processor,
duke@435 696 DiscoveredList refs_lists[],
coleenp@548 697 HeapWord* pending_list_addr,
duke@435 698 int n_queues)
duke@435 699 : _ref_processor(ref_processor),
duke@435 700 _refs_lists(refs_lists),
duke@435 701 _pending_list_addr(pending_list_addr),
duke@435 702 _n_queues(n_queues)
duke@435 703 { }
duke@435 704
duke@435 705 public:
duke@435 706 virtual void work(unsigned int work_id) = 0;
duke@435 707
duke@435 708 protected:
duke@435 709 ReferenceProcessor& _ref_processor;
duke@435 710 DiscoveredList* _refs_lists;
coleenp@548 711 HeapWord* _pending_list_addr;
duke@435 712 int _n_queues;
duke@435 713 };
stefank@2314 714
stefank@2314 715 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP

mercurial