src/share/vm/memory/referenceProcessor.hpp

Wed, 01 May 2013 14:11:01 +0100

author
chegar
date
Wed, 01 May 2013 14:11:01 +0100
changeset 5246
4b52137b07c9
parent 4037
da91efe96a93
child 5237
f2110083203d
permissions
-rw-r--r--

Merge

duke@435 1 /*
coleenp@4037 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
stefank@2314 27
stefank@2314 28 #include "memory/referencePolicy.hpp"
stefank@2314 29 #include "oops/instanceRefKlass.hpp"
stefank@2314 30
duke@435 31 // ReferenceProcessor class encapsulates the per-"collector" processing
ysr@888 32 // of java.lang.Reference objects for GC. The interface is useful for supporting
duke@435 33 // a generational abstraction, in particular when there are multiple
duke@435 34 // generations that are being independently collected -- possibly
duke@435 35 // concurrently and/or incrementally. Note, however, that the
duke@435 36 // ReferenceProcessor class abstracts away from a generational setting
duke@435 37 // by using only a heap interval (called "span" below), thus allowing
duke@435 38 // its use in a straightforward manner in a general, non-generational
duke@435 39 // setting.
duke@435 40 //
duke@435 41 // The basic idea is that each ReferenceProcessor object concerns
duke@435 42 // itself with ("weak") reference processing in a specific "span"
duke@435 43 // of the heap of interest to a specific collector. Currently,
duke@435 44 // the span is a convex interval of the heap, but, efficiency
duke@435 45 // apart, there seems to be no reason it couldn't be extended
duke@435 46 // (with appropriate modifications) to any "non-convex interval".
duke@435 47
duke@435 48 // forward references
duke@435 49 class ReferencePolicy;
duke@435 50 class AbstractRefProcTaskExecutor;
johnc@3175 51
johnc@3175 52 // List of discovered references.
johnc@3175 53 class DiscoveredList {
johnc@3175 54 public:
johnc@3175 55 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
johnc@3175 56 oop head() const {
johnc@3175 57 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
johnc@3175 58 _oop_head;
johnc@3175 59 }
johnc@3175 60 HeapWord* adr_head() {
johnc@3175 61 return UseCompressedOops ? (HeapWord*)&_compressed_head :
johnc@3175 62 (HeapWord*)&_oop_head;
johnc@3175 63 }
johnc@3175 64 void set_head(oop o) {
johnc@3175 65 if (UseCompressedOops) {
johnc@3175 66 // Must compress the head ptr.
johnc@3175 67 _compressed_head = oopDesc::encode_heap_oop(o);
johnc@3175 68 } else {
johnc@3175 69 _oop_head = o;
johnc@3175 70 }
johnc@3175 71 }
johnc@3175 72 bool is_empty() const { return head() == NULL; }
johnc@3175 73 size_t length() { return _len; }
johnc@3175 74 void set_length(size_t len) { _len = len; }
johnc@3175 75 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
johnc@3175 76 void dec_length(size_t dec) { _len -= dec; }
johnc@3175 77 private:
johnc@3175 78 // Set value depending on UseCompressedOops. This could be a template class
johnc@3175 79 // but then we have to fix all the instantiations and declarations that use this class.
johnc@3175 80 oop _oop_head;
johnc@3175 81 narrowOop _compressed_head;
johnc@3175 82 size_t _len;
johnc@3175 83 };
johnc@3175 84
johnc@3175 85 // Iterator for the list of discovered references.
johnc@3175 86 class DiscoveredListIterator {
johnc@3175 87 private:
johnc@3175 88 DiscoveredList& _refs_list;
johnc@3175 89 HeapWord* _prev_next;
johnc@3175 90 oop _prev;
johnc@3175 91 oop _ref;
johnc@3175 92 HeapWord* _discovered_addr;
johnc@3175 93 oop _next;
johnc@3175 94 HeapWord* _referent_addr;
johnc@3175 95 oop _referent;
johnc@3175 96 OopClosure* _keep_alive;
johnc@3175 97 BoolObjectClosure* _is_alive;
johnc@3175 98
johnc@3175 99 DEBUG_ONLY(
johnc@3175 100 oop _first_seen; // cyclic linked list check
johnc@3175 101 )
johnc@3175 102
johnc@3175 103 NOT_PRODUCT(
johnc@3175 104 size_t _processed;
johnc@3175 105 size_t _removed;
johnc@3175 106 )
johnc@3175 107
johnc@3175 108 public:
johnc@3175 109 inline DiscoveredListIterator(DiscoveredList& refs_list,
johnc@3175 110 OopClosure* keep_alive,
johnc@3175 111 BoolObjectClosure* is_alive):
johnc@3175 112 _refs_list(refs_list),
johnc@3175 113 _prev_next(refs_list.adr_head()),
johnc@3175 114 _prev(NULL),
johnc@3175 115 _ref(refs_list.head()),
johnc@3175 116 #ifdef ASSERT
johnc@3175 117 _first_seen(refs_list.head()),
johnc@3175 118 #endif
johnc@3175 119 #ifndef PRODUCT
johnc@3175 120 _processed(0),
johnc@3175 121 _removed(0),
johnc@3175 122 #endif
johnc@3175 123 _next(NULL),
johnc@3175 124 _keep_alive(keep_alive),
johnc@3175 125 _is_alive(is_alive)
johnc@3175 126 { }
johnc@3175 127
johnc@3175 128 // End Of List.
johnc@3175 129 inline bool has_next() const { return _ref != NULL; }
johnc@3175 130
johnc@3175 131 // Get oop to the Reference object.
johnc@3175 132 inline oop obj() const { return _ref; }
johnc@3175 133
johnc@3175 134 // Get oop to the referent object.
johnc@3175 135 inline oop referent() const { return _referent; }
johnc@3175 136
johnc@3175 137 // Returns true if referent is alive.
johnc@3175 138 inline bool is_referent_alive() const {
johnc@3175 139 return _is_alive->do_object_b(_referent);
johnc@3175 140 }
johnc@3175 141
johnc@3175 142 // Loads data for the current reference.
johnc@3175 143 // The "allow_null_referent" argument tells us to allow for the possibility
johnc@3175 144 // of a NULL referent in the discovered Reference object. This typically
johnc@3175 145 // happens in the case of concurrent collectors that may have done the
johnc@3175 146 // discovery concurrently, or interleaved, with mutator execution.
johnc@3175 147 void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
johnc@3175 148
johnc@3175 149 // Move to the next discovered reference.
johnc@3175 150 inline void next() {
johnc@3175 151 _prev_next = _discovered_addr;
johnc@3175 152 _prev = _ref;
johnc@3175 153 move_to_next();
johnc@3175 154 }
johnc@3175 155
johnc@3175 156 // Remove the current reference from the list
johnc@3175 157 void remove();
johnc@3175 158
johnc@3175 159 // Make the Reference object active again.
johnc@3175 160 void make_active();
johnc@3175 161
johnc@3175 162 // Make the referent alive.
johnc@3175 163 inline void make_referent_alive() {
johnc@3175 164 if (UseCompressedOops) {
johnc@3175 165 _keep_alive->do_oop((narrowOop*)_referent_addr);
johnc@3175 166 } else {
johnc@3175 167 _keep_alive->do_oop((oop*)_referent_addr);
johnc@3175 168 }
johnc@3175 169 }
johnc@3175 170
johnc@3175 171 // Update the discovered field.
johnc@3175 172 inline void update_discovered() {
johnc@3175 173 // First _prev_next ref actually points into DiscoveredList (gross).
johnc@3175 174 if (UseCompressedOops) {
johnc@3175 175 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
johnc@3175 176 _keep_alive->do_oop((narrowOop*)_prev_next);
johnc@3175 177 }
johnc@3175 178 } else {
johnc@3175 179 if (!oopDesc::is_null(*(oop*)_prev_next)) {
johnc@3175 180 _keep_alive->do_oop((oop*)_prev_next);
johnc@3175 181 }
johnc@3175 182 }
johnc@3175 183 }
johnc@3175 184
johnc@3175 185 // NULL out referent pointer.
johnc@3175 186 void clear_referent();
johnc@3175 187
johnc@3175 188 // Statistics
johnc@3175 189 NOT_PRODUCT(
johnc@3175 190 inline size_t processed() const { return _processed; }
johnc@3175 191 inline size_t removed() const { return _removed; }
johnc@3175 192 )
johnc@3175 193
johnc@3175 194 inline void move_to_next() {
johnc@3175 195 if (_ref == _next) {
johnc@3175 196 // End of the list.
johnc@3175 197 _ref = NULL;
johnc@3175 198 } else {
johnc@3175 199 _ref = _next;
johnc@3175 200 }
johnc@3175 201 assert(_ref != _first_seen, "cyclic ref_list found");
johnc@3175 202 NOT_PRODUCT(_processed++);
johnc@3175 203 }
johnc@3175 204 };
duke@435 205
zgu@3900 206 class ReferenceProcessor : public CHeapObj<mtGC> {
duke@435 207 protected:
ysr@3117 208 // Compatibility with pre-4965777 JDK's
ysr@3117 209 static bool _pending_list_uses_discovered_field;
johnc@3175 210
johnc@3188 211 // The SoftReference master timestamp clock
johnc@3188 212 static jlong _soft_ref_timestamp_clock;
johnc@3188 213
johnc@3175 214 MemRegion _span; // (right-open) interval of heap
johnc@3175 215 // subject to wkref discovery
johnc@3175 216
johnc@3175 217 bool _discovering_refs; // true when discovery enabled
johnc@3175 218 bool _discovery_is_atomic; // if discovery is atomic wrt
johnc@3175 219 // other collectors in configuration
johnc@3175 220 bool _discovery_is_mt; // true if reference discovery is MT.
johnc@3175 221
ysr@777 222 // If true, setting "next" field of a discovered refs list requires
ysr@777 223 // write barrier(s). (Must be true if used in a collector in which
ysr@777 224 // elements of a discovered list may be moved during discovery: for
ysr@777 225 // example, a collector like Garbage-First that moves objects during a
ysr@777 226 // long-term concurrent marking phase that does weak reference
ysr@777 227 // discovery.)
ysr@777 228 bool _discovered_list_needs_barrier;
duke@435 229
johnc@3175 230 BarrierSet* _bs; // Cached copy of BarrierSet.
johnc@3175 231 bool _enqueuing_is_done; // true if all weak references enqueued
johnc@3175 232 bool _processing_is_mt; // true during phases when
johnc@3175 233 // reference processing is MT.
jmasa@3357 234 uint _next_id; // round-robin mod _num_q counter in
johnc@3175 235 // support of work distribution
johnc@3175 236
johnc@3175 237 // For collectors that do not keep GC liveness information
duke@435 238 // in the object header, this field holds a closure that
duke@435 239 // helps the reference processor determine the reachability
johnc@3175 240 // of an oop. It is currently initialized to NULL for all
johnc@3175 241 // collectors except for CMS and G1.
duke@435 242 BoolObjectClosure* _is_alive_non_header;
duke@435 243
ysr@888 244 // Soft ref clearing policies
ysr@888 245 // . the default policy
ysr@888 246 static ReferencePolicy* _default_soft_ref_policy;
ysr@888 247 // . the "clear all" policy
ysr@888 248 static ReferencePolicy* _always_clear_soft_ref_policy;
ysr@888 249 // . the current policy below is either one of the above
ysr@888 250 ReferencePolicy* _current_soft_ref_policy;
ysr@888 251
duke@435 252 // The discovered ref lists themselves
coleenp@548 253
jmasa@2188 254 // The active MT'ness degree of the queues below
jmasa@3357 255 uint _num_q;
jmasa@2188 256 // The maximum MT'ness degree of the queues below
jmasa@3357 257 uint _max_num_q;
johnc@3210 258
johnc@3210 259 // Master array of discovered oops
johnc@3210 260 DiscoveredList* _discovered_refs;
johnc@3210 261
johnc@3210 262 // Arrays of lists of oops, one per thread (pointers into master array above)
coleenp@548 263 DiscoveredList* _discoveredSoftRefs;
duke@435 264 DiscoveredList* _discoveredWeakRefs;
duke@435 265 DiscoveredList* _discoveredFinalRefs;
duke@435 266 DiscoveredList* _discoveredPhantomRefs;
duke@435 267
duke@435 268 public:
johnc@3175 269 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
johnc@3175 270
jmasa@3357 271 uint num_q() { return _num_q; }
jmasa@3357 272 uint max_num_q() { return _max_num_q; }
jmasa@3357 273 void set_active_mt_degree(uint v) { _num_q = v; }
johnc@3210 274
johnc@3210 275 DiscoveredList* discovered_refs() { return _discovered_refs; }
johnc@3175 276
ysr@892 277 ReferencePolicy* setup_policy(bool always_clear) {
ysr@888 278 _current_soft_ref_policy = always_clear ?
ysr@888 279 _always_clear_soft_ref_policy : _default_soft_ref_policy;
ysr@892 280 _current_soft_ref_policy->setup(); // snapshot the policy threshold
ysr@888 281 return _current_soft_ref_policy;
ysr@888 282 }
duke@435 283
duke@435 284 // Process references with a certain reachability level.
duke@435 285 void process_discovered_reflist(DiscoveredList refs_lists[],
duke@435 286 ReferencePolicy* policy,
duke@435 287 bool clear_referent,
duke@435 288 BoolObjectClosure* is_alive,
duke@435 289 OopClosure* keep_alive,
duke@435 290 VoidClosure* complete_gc,
duke@435 291 AbstractRefProcTaskExecutor* task_executor);
duke@435 292
duke@435 293 void process_phaseJNI(BoolObjectClosure* is_alive,
duke@435 294 OopClosure* keep_alive,
duke@435 295 VoidClosure* complete_gc);
duke@435 296
duke@435 297 // Work methods used by the method process_discovered_reflist
duke@435 298 // Phase1: keep alive all those referents that are otherwise
duke@435 299 // dead but which must be kept alive by policy (and their closure).
coleenp@548 300 void process_phase1(DiscoveredList& refs_list,
duke@435 301 ReferencePolicy* policy,
duke@435 302 BoolObjectClosure* is_alive,
duke@435 303 OopClosure* keep_alive,
duke@435 304 VoidClosure* complete_gc);
duke@435 305 // Phase2: remove all those references whose referents are
duke@435 306 // reachable.
coleenp@548 307 inline void process_phase2(DiscoveredList& refs_list,
duke@435 308 BoolObjectClosure* is_alive,
duke@435 309 OopClosure* keep_alive,
duke@435 310 VoidClosure* complete_gc) {
duke@435 311 if (discovery_is_atomic()) {
duke@435 312 // complete_gc is ignored in this case for this phase
coleenp@548 313 pp2_work(refs_list, is_alive, keep_alive);
duke@435 314 } else {
duke@435 315 assert(complete_gc != NULL, "Error");
coleenp@548 316 pp2_work_concurrent_discovery(refs_list, is_alive,
duke@435 317 keep_alive, complete_gc);
duke@435 318 }
duke@435 319 }
duke@435 320 // Work methods in support of process_phase2
coleenp@548 321 void pp2_work(DiscoveredList& refs_list,
duke@435 322 BoolObjectClosure* is_alive,
duke@435 323 OopClosure* keep_alive);
duke@435 324 void pp2_work_concurrent_discovery(
coleenp@548 325 DiscoveredList& refs_list,
duke@435 326 BoolObjectClosure* is_alive,
duke@435 327 OopClosure* keep_alive,
duke@435 328 VoidClosure* complete_gc);
duke@435 329 // Phase3: process the referents by either clearing them
duke@435 330 // or keeping them alive (and their closure)
coleenp@548 331 void process_phase3(DiscoveredList& refs_list,
duke@435 332 bool clear_referent,
duke@435 333 BoolObjectClosure* is_alive,
duke@435 334 OopClosure* keep_alive,
duke@435 335 VoidClosure* complete_gc);
duke@435 336
duke@435 337 // Enqueue references with a certain reachability level
coleenp@548 338 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
duke@435 339
duke@435 340 // "Preclean" all the discovered reference lists
duke@435 341 // by removing references with strongly reachable referents.
duke@435 342 // The first argument is a predicate on an oop that indicates
duke@435 343 // its (strong) reachability and the second is a closure that
duke@435 344 // may be used to incrementalize or abort the precleaning process.
duke@435 345 // The caller is responsible for taking care of potential
duke@435 346 // interference with concurrent operations on these lists
duke@435 347 // (or predicates involved) by other threads. Currently
coleenp@4037 348 // only used by the CMS collector.
duke@435 349 void preclean_discovered_references(BoolObjectClosure* is_alive,
duke@435 350 OopClosure* keep_alive,
duke@435 351 VoidClosure* complete_gc,
coleenp@4037 352 YieldClosure* yield);
duke@435 353
duke@435 354 // Delete entries in the discovered lists that have
duke@435 355 // either a null referent or are not active. Such
duke@435 356 // Reference objects can result from the clearing
duke@435 357 // or enqueueing of Reference objects concurrent
duke@435 358 // with their discovery by a (concurrent) collector.
duke@435 359 // For a definition of "active" see java.lang.ref.Reference;
duke@435 360 // Refs are born active, become inactive when enqueued,
duke@435 361 // and never become active again. The state of being
duke@435 362 // active is encoded as follows: A Ref is active
duke@435 363 // if and only if its "next" field is NULL.
duke@435 364 void clean_up_discovered_references();
duke@435 365 void clean_up_discovered_reflist(DiscoveredList& refs_list);
duke@435 366
duke@435 367 // Returns the name of the discovered reference list
duke@435 368 // occupying the i / _num_q slot.
jmasa@3357 369 const char* list_name(uint i);
duke@435 370
coleenp@548 371 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
coleenp@548 372
duke@435 373 protected:
johnc@3175 374 // Set the 'discovered' field of the given reference to
johnc@3175 375 // the given value - emitting barriers depending upon
johnc@3175 376 // the value of _discovered_list_needs_barrier.
johnc@3175 377 void set_discovered(oop ref, oop value);
johnc@3175 378
duke@435 379 // "Preclean" the given discovered reference list
duke@435 380 // by removing references with strongly reachable referents.
duke@435 381 // Currently used in support of CMS only.
duke@435 382 void preclean_discovered_reflist(DiscoveredList& refs_list,
duke@435 383 BoolObjectClosure* is_alive,
duke@435 384 OopClosure* keep_alive,
duke@435 385 VoidClosure* complete_gc,
duke@435 386 YieldClosure* yield);
duke@435 387
ysr@2651 388 // round-robin mod _num_q (not: _not_ mode _max_num_q)
jmasa@3357 389 uint next_id() {
jmasa@3357 390 uint id = _next_id;
duke@435 391 if (++_next_id == _num_q) {
duke@435 392 _next_id = 0;
duke@435 393 }
duke@435 394 return id;
duke@435 395 }
duke@435 396 DiscoveredList* get_discovered_list(ReferenceType rt);
duke@435 397 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
coleenp@548 398 HeapWord* discovered_addr);
duke@435 399 void verify_ok_to_handle_reflists() PRODUCT_RETURN;
duke@435 400
stefank@3115 401 void clear_discovered_references(DiscoveredList& refs_list);
duke@435 402 void abandon_partial_discovered_list(DiscoveredList& refs_list);
duke@435 403
duke@435 404 // Calculate the number of jni handles.
duke@435 405 unsigned int count_jni_refs();
duke@435 406
duke@435 407 // Balances reference queues.
duke@435 408 void balance_queues(DiscoveredList ref_lists[]);
duke@435 409
duke@435 410 // Update (advance) the soft ref master clock field.
duke@435 411 void update_soft_ref_master_clock();
duke@435 412
duke@435 413 public:
duke@435 414 // constructor
duke@435 415 ReferenceProcessor():
duke@435 416 _span((HeapWord*)NULL, (HeapWord*)NULL),
johnc@3210 417 _discovered_refs(NULL),
duke@435 418 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL),
duke@435 419 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
duke@435 420 _discovering_refs(false),
duke@435 421 _discovery_is_atomic(true),
duke@435 422 _enqueuing_is_done(false),
duke@435 423 _discovery_is_mt(false),
ysr@777 424 _discovered_list_needs_barrier(false),
ysr@777 425 _bs(NULL),
duke@435 426 _is_alive_non_header(NULL),
duke@435 427 _num_q(0),
jmasa@2188 428 _max_num_q(0),
duke@435 429 _processing_is_mt(false),
duke@435 430 _next_id(0)
ysr@2651 431 { }
duke@435 432
ysr@2651 433 // Default parameters give you a vanilla reference processor.
ysr@2651 434 ReferenceProcessor(MemRegion span,
jmasa@3357 435 bool mt_processing = false, uint mt_processing_degree = 1,
jmasa@3357 436 bool mt_discovery = false, uint mt_discovery_degree = 1,
ysr@2651 437 bool atomic_discovery = true,
ysr@2651 438 BoolObjectClosure* is_alive_non_header = NULL,
ysr@777 439 bool discovered_list_needs_barrier = false);
duke@435 440
duke@435 441 // RefDiscoveryPolicy values
johnc@1679 442 enum DiscoveryPolicy {
duke@435 443 ReferenceBasedDiscovery = 0,
johnc@1679 444 ReferentBasedDiscovery = 1,
johnc@1679 445 DiscoveryPolicyMin = ReferenceBasedDiscovery,
johnc@1679 446 DiscoveryPolicyMax = ReferentBasedDiscovery
duke@435 447 };
duke@435 448
duke@435 449 static void init_statics();
duke@435 450
duke@435 451 public:
duke@435 452 // get and set "is_alive_non_header" field
duke@435 453 BoolObjectClosure* is_alive_non_header() {
duke@435 454 return _is_alive_non_header;
duke@435 455 }
duke@435 456 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
duke@435 457 _is_alive_non_header = is_alive_non_header;
duke@435 458 }
duke@435 459
duke@435 460 // get and set span
duke@435 461 MemRegion span() { return _span; }
duke@435 462 void set_span(MemRegion span) { _span = span; }
duke@435 463
duke@435 464 // start and stop weak ref discovery
johnc@3188 465 void enable_discovery(bool verify_disabled, bool check_no_refs);
duke@435 466 void disable_discovery() { _discovering_refs = false; }
duke@435 467 bool discovery_enabled() { return _discovering_refs; }
duke@435 468
duke@435 469 // whether discovery is atomic wrt other collectors
duke@435 470 bool discovery_is_atomic() const { return _discovery_is_atomic; }
duke@435 471 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
duke@435 472
ysr@3117 473 // whether the JDK in which we are embedded is a pre-4965777 JDK,
ysr@3117 474 // and thus whether or not it uses the discovered field to chain
ysr@3117 475 // the entries in the pending list.
ysr@3117 476 static bool pending_list_uses_discovered_field() {
ysr@3117 477 return _pending_list_uses_discovered_field;
ysr@3117 478 }
ysr@3117 479
duke@435 480 // whether discovery is done by multiple threads same-old-timeously
duke@435 481 bool discovery_is_mt() const { return _discovery_is_mt; }
duke@435 482 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
duke@435 483
duke@435 484 // Whether we are in a phase when _processing_ is MT.
duke@435 485 bool processing_is_mt() const { return _processing_is_mt; }
duke@435 486 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
duke@435 487
duke@435 488 // whether all enqueuing of weak references is complete
duke@435 489 bool enqueuing_is_done() { return _enqueuing_is_done; }
duke@435 490 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
duke@435 491
duke@435 492 // iterate over oops
duke@435 493 void weak_oops_do(OopClosure* f); // weak roots
duke@435 494
jmasa@2188 495 // Balance each of the discovered lists.
jmasa@2188 496 void balance_all_queues();
coleenp@4037 497 void verify_list(DiscoveredList& ref_list);
jmasa@2188 498
duke@435 499 // Discover a Reference object, using appropriate discovery criteria
duke@435 500 bool discover_reference(oop obj, ReferenceType rt);
duke@435 501
duke@435 502 // Process references found during GC (called by the garbage collector)
ysr@888 503 void process_discovered_references(BoolObjectClosure* is_alive,
duke@435 504 OopClosure* keep_alive,
duke@435 505 VoidClosure* complete_gc,
duke@435 506 AbstractRefProcTaskExecutor* task_executor);
duke@435 507
duke@435 508 public:
duke@435 509 // Enqueue references at end of GC (called by the garbage collector)
duke@435 510 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
duke@435 511
ysr@777 512 // If a discovery is in process that is being superceded, abandon it: all
ysr@777 513 // the discovered lists will be empty, and all the objects on them will
ysr@777 514 // have NULL discovered fields. Must be called only at a safepoint.
ysr@777 515 void abandon_partial_discovery();
ysr@777 516
duke@435 517 // debugging
duke@435 518 void verify_no_references_recorded() PRODUCT_RETURN;
ysr@2337 519 void verify_referent(oop obj) PRODUCT_RETURN;
duke@435 520
duke@435 521 // clear the discovered lists (unlinking each entry).
duke@435 522 void clear_discovered_references() PRODUCT_RETURN;
duke@435 523 };
duke@435 524
duke@435 525 // A utility class to disable reference discovery in
duke@435 526 // the scope which contains it, for given ReferenceProcessor.
duke@435 527 class NoRefDiscovery: StackObj {
duke@435 528 private:
duke@435 529 ReferenceProcessor* _rp;
duke@435 530 bool _was_discovering_refs;
duke@435 531 public:
duke@435 532 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
twisti@2144 533 _was_discovering_refs = _rp->discovery_enabled();
twisti@2144 534 if (_was_discovering_refs) {
duke@435 535 _rp->disable_discovery();
duke@435 536 }
duke@435 537 }
duke@435 538
duke@435 539 ~NoRefDiscovery() {
duke@435 540 if (_was_discovering_refs) {
johnc@3175 541 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
duke@435 542 }
duke@435 543 }
duke@435 544 };
duke@435 545
duke@435 546
duke@435 547 // A utility class to temporarily mutate the span of the
duke@435 548 // given ReferenceProcessor in the scope that contains it.
duke@435 549 class ReferenceProcessorSpanMutator: StackObj {
duke@435 550 private:
duke@435 551 ReferenceProcessor* _rp;
duke@435 552 MemRegion _saved_span;
duke@435 553
duke@435 554 public:
duke@435 555 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
duke@435 556 MemRegion span):
duke@435 557 _rp(rp) {
duke@435 558 _saved_span = _rp->span();
duke@435 559 _rp->set_span(span);
duke@435 560 }
duke@435 561
duke@435 562 ~ReferenceProcessorSpanMutator() {
duke@435 563 _rp->set_span(_saved_span);
duke@435 564 }
duke@435 565 };
duke@435 566
duke@435 567 // A utility class to temporarily change the MT'ness of
duke@435 568 // reference discovery for the given ReferenceProcessor
duke@435 569 // in the scope that contains it.
ysr@2651 570 class ReferenceProcessorMTDiscoveryMutator: StackObj {
duke@435 571 private:
duke@435 572 ReferenceProcessor* _rp;
duke@435 573 bool _saved_mt;
duke@435 574
duke@435 575 public:
ysr@2651 576 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
ysr@2651 577 bool mt):
duke@435 578 _rp(rp) {
duke@435 579 _saved_mt = _rp->discovery_is_mt();
duke@435 580 _rp->set_mt_discovery(mt);
duke@435 581 }
duke@435 582
ysr@2651 583 ~ReferenceProcessorMTDiscoveryMutator() {
duke@435 584 _rp->set_mt_discovery(_saved_mt);
duke@435 585 }
duke@435 586 };
duke@435 587
duke@435 588
duke@435 589 // A utility class to temporarily change the disposition
duke@435 590 // of the "is_alive_non_header" closure field of the
duke@435 591 // given ReferenceProcessor in the scope that contains it.
duke@435 592 class ReferenceProcessorIsAliveMutator: StackObj {
duke@435 593 private:
duke@435 594 ReferenceProcessor* _rp;
duke@435 595 BoolObjectClosure* _saved_cl;
duke@435 596
duke@435 597 public:
duke@435 598 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
duke@435 599 BoolObjectClosure* cl):
duke@435 600 _rp(rp) {
duke@435 601 _saved_cl = _rp->is_alive_non_header();
duke@435 602 _rp->set_is_alive_non_header(cl);
duke@435 603 }
duke@435 604
duke@435 605 ~ReferenceProcessorIsAliveMutator() {
duke@435 606 _rp->set_is_alive_non_header(_saved_cl);
duke@435 607 }
duke@435 608 };
duke@435 609
duke@435 610 // A utility class to temporarily change the disposition
duke@435 611 // of the "discovery_is_atomic" field of the
duke@435 612 // given ReferenceProcessor in the scope that contains it.
duke@435 613 class ReferenceProcessorAtomicMutator: StackObj {
duke@435 614 private:
duke@435 615 ReferenceProcessor* _rp;
duke@435 616 bool _saved_atomic_discovery;
duke@435 617
duke@435 618 public:
duke@435 619 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
duke@435 620 bool atomic):
duke@435 621 _rp(rp) {
duke@435 622 _saved_atomic_discovery = _rp->discovery_is_atomic();
duke@435 623 _rp->set_atomic_discovery(atomic);
duke@435 624 }
duke@435 625
duke@435 626 ~ReferenceProcessorAtomicMutator() {
duke@435 627 _rp->set_atomic_discovery(_saved_atomic_discovery);
duke@435 628 }
duke@435 629 };
duke@435 630
duke@435 631
duke@435 632 // A utility class to temporarily change the MT processing
duke@435 633 // disposition of the given ReferenceProcessor instance
duke@435 634 // in the scope that contains it.
duke@435 635 class ReferenceProcessorMTProcMutator: StackObj {
duke@435 636 private:
duke@435 637 ReferenceProcessor* _rp;
duke@435 638 bool _saved_mt;
duke@435 639
duke@435 640 public:
duke@435 641 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
duke@435 642 bool mt):
duke@435 643 _rp(rp) {
duke@435 644 _saved_mt = _rp->processing_is_mt();
duke@435 645 _rp->set_mt_processing(mt);
duke@435 646 }
duke@435 647
duke@435 648 ~ReferenceProcessorMTProcMutator() {
duke@435 649 _rp->set_mt_processing(_saved_mt);
duke@435 650 }
duke@435 651 };
duke@435 652
duke@435 653
duke@435 654 // This class is an interface used to implement task execution for the
duke@435 655 // reference processing.
duke@435 656 class AbstractRefProcTaskExecutor {
duke@435 657 public:
duke@435 658
duke@435 659 // Abstract tasks to execute.
duke@435 660 class ProcessTask;
duke@435 661 class EnqueueTask;
duke@435 662
duke@435 663 // Executes a task using worker threads.
duke@435 664 virtual void execute(ProcessTask& task) = 0;
duke@435 665 virtual void execute(EnqueueTask& task) = 0;
duke@435 666
duke@435 667 // Switch to single threaded mode.
duke@435 668 virtual void set_single_threaded_mode() { };
duke@435 669 };
duke@435 670
duke@435 671 // Abstract reference processing task to execute.
duke@435 672 class AbstractRefProcTaskExecutor::ProcessTask {
duke@435 673 protected:
duke@435 674 ProcessTask(ReferenceProcessor& ref_processor,
duke@435 675 DiscoveredList refs_lists[],
duke@435 676 bool marks_oops_alive)
duke@435 677 : _ref_processor(ref_processor),
duke@435 678 _refs_lists(refs_lists),
duke@435 679 _marks_oops_alive(marks_oops_alive)
duke@435 680 { }
duke@435 681
duke@435 682 public:
duke@435 683 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
duke@435 684 OopClosure& keep_alive,
duke@435 685 VoidClosure& complete_gc) = 0;
duke@435 686
duke@435 687 // Returns true if a task marks some oops as alive.
duke@435 688 bool marks_oops_alive() const
duke@435 689 { return _marks_oops_alive; }
duke@435 690
duke@435 691 protected:
duke@435 692 ReferenceProcessor& _ref_processor;
duke@435 693 DiscoveredList* _refs_lists;
duke@435 694 const bool _marks_oops_alive;
duke@435 695 };
duke@435 696
duke@435 697 // Abstract reference processing task to execute.
duke@435 698 class AbstractRefProcTaskExecutor::EnqueueTask {
duke@435 699 protected:
duke@435 700 EnqueueTask(ReferenceProcessor& ref_processor,
duke@435 701 DiscoveredList refs_lists[],
coleenp@548 702 HeapWord* pending_list_addr,
duke@435 703 int n_queues)
duke@435 704 : _ref_processor(ref_processor),
duke@435 705 _refs_lists(refs_lists),
duke@435 706 _pending_list_addr(pending_list_addr),
duke@435 707 _n_queues(n_queues)
duke@435 708 { }
duke@435 709
duke@435 710 public:
duke@435 711 virtual void work(unsigned int work_id) = 0;
duke@435 712
duke@435 713 protected:
duke@435 714 ReferenceProcessor& _ref_processor;
duke@435 715 DiscoveredList* _refs_lists;
coleenp@548 716 HeapWord* _pending_list_addr;
duke@435 717 int _n_queues;
duke@435 718 };
stefank@2314 719
stefank@2314 720 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP

mercurial