src/share/vm/memory/referenceProcessor.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 3117
eca1193ca245
child 3188
d1bdeef3e3e2
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
ysr@2651 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
stefank@2314 27
stefank@2314 28 #include "memory/referencePolicy.hpp"
stefank@2314 29 #include "oops/instanceRefKlass.hpp"
stefank@2314 30
duke@435 31 // ReferenceProcessor class encapsulates the per-"collector" processing
ysr@888 32 // of java.lang.Reference objects for GC. The interface is useful for supporting
duke@435 33 // a generational abstraction, in particular when there are multiple
duke@435 34 // generations that are being independently collected -- possibly
duke@435 35 // concurrently and/or incrementally. Note, however, that the
duke@435 36 // ReferenceProcessor class abstracts away from a generational setting
duke@435 37 // by using only a heap interval (called "span" below), thus allowing
duke@435 38 // its use in a straightforward manner in a general, non-generational
duke@435 39 // setting.
duke@435 40 //
duke@435 41 // The basic idea is that each ReferenceProcessor object concerns
duke@435 42 // itself with ("weak") reference processing in a specific "span"
duke@435 43 // of the heap of interest to a specific collector. Currently,
duke@435 44 // the span is a convex interval of the heap, but, efficiency
duke@435 45 // apart, there seems to be no reason it couldn't be extended
duke@435 46 // (with appropriate modifications) to any "non-convex interval".
duke@435 47
duke@435 48 // forward references
duke@435 49 class ReferencePolicy;
duke@435 50 class AbstractRefProcTaskExecutor;
johnc@3175 51
johnc@3175 52 // List of discovered references.
johnc@3175 53 class DiscoveredList {
johnc@3175 54 public:
johnc@3175 55 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
johnc@3175 56 oop head() const {
johnc@3175 57 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
johnc@3175 58 _oop_head;
johnc@3175 59 }
johnc@3175 60 HeapWord* adr_head() {
johnc@3175 61 return UseCompressedOops ? (HeapWord*)&_compressed_head :
johnc@3175 62 (HeapWord*)&_oop_head;
johnc@3175 63 }
johnc@3175 64 void set_head(oop o) {
johnc@3175 65 if (UseCompressedOops) {
johnc@3175 66 // Must compress the head ptr.
johnc@3175 67 _compressed_head = oopDesc::encode_heap_oop(o);
johnc@3175 68 } else {
johnc@3175 69 _oop_head = o;
johnc@3175 70 }
johnc@3175 71 }
johnc@3175 72 bool is_empty() const { return head() == NULL; }
johnc@3175 73 size_t length() { return _len; }
johnc@3175 74 void set_length(size_t len) { _len = len; }
johnc@3175 75 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
johnc@3175 76 void dec_length(size_t dec) { _len -= dec; }
johnc@3175 77 private:
johnc@3175 78 // Set value depending on UseCompressedOops. This could be a template class
johnc@3175 79 // but then we have to fix all the instantiations and declarations that use this class.
johnc@3175 80 oop _oop_head;
johnc@3175 81 narrowOop _compressed_head;
johnc@3175 82 size_t _len;
johnc@3175 83 };
johnc@3175 84
johnc@3175 85 // Iterator for the list of discovered references.
johnc@3175 86 class DiscoveredListIterator {
johnc@3175 87 private:
johnc@3175 88 DiscoveredList& _refs_list;
johnc@3175 89 HeapWord* _prev_next;
johnc@3175 90 oop _prev;
johnc@3175 91 oop _ref;
johnc@3175 92 HeapWord* _discovered_addr;
johnc@3175 93 oop _next;
johnc@3175 94 HeapWord* _referent_addr;
johnc@3175 95 oop _referent;
johnc@3175 96 OopClosure* _keep_alive;
johnc@3175 97 BoolObjectClosure* _is_alive;
johnc@3175 98
johnc@3175 99 DEBUG_ONLY(
johnc@3175 100 oop _first_seen; // cyclic linked list check
johnc@3175 101 )
johnc@3175 102
johnc@3175 103 NOT_PRODUCT(
johnc@3175 104 size_t _processed;
johnc@3175 105 size_t _removed;
johnc@3175 106 )
johnc@3175 107
johnc@3175 108 public:
johnc@3175 109 inline DiscoveredListIterator(DiscoveredList& refs_list,
johnc@3175 110 OopClosure* keep_alive,
johnc@3175 111 BoolObjectClosure* is_alive):
johnc@3175 112 _refs_list(refs_list),
johnc@3175 113 _prev_next(refs_list.adr_head()),
johnc@3175 114 _prev(NULL),
johnc@3175 115 _ref(refs_list.head()),
johnc@3175 116 #ifdef ASSERT
johnc@3175 117 _first_seen(refs_list.head()),
johnc@3175 118 #endif
johnc@3175 119 #ifndef PRODUCT
johnc@3175 120 _processed(0),
johnc@3175 121 _removed(0),
johnc@3175 122 #endif
johnc@3175 123 _next(NULL),
johnc@3175 124 _keep_alive(keep_alive),
johnc@3175 125 _is_alive(is_alive)
johnc@3175 126 { }
johnc@3175 127
johnc@3175 128 // End Of List.
johnc@3175 129 inline bool has_next() const { return _ref != NULL; }
johnc@3175 130
johnc@3175 131 // Get oop to the Reference object.
johnc@3175 132 inline oop obj() const { return _ref; }
johnc@3175 133
johnc@3175 134 // Get oop to the referent object.
johnc@3175 135 inline oop referent() const { return _referent; }
johnc@3175 136
johnc@3175 137 // Returns true if referent is alive.
johnc@3175 138 inline bool is_referent_alive() const {
johnc@3175 139 return _is_alive->do_object_b(_referent);
johnc@3175 140 }
johnc@3175 141
johnc@3175 142 // Loads data for the current reference.
johnc@3175 143 // The "allow_null_referent" argument tells us to allow for the possibility
johnc@3175 144 // of a NULL referent in the discovered Reference object. This typically
johnc@3175 145 // happens in the case of concurrent collectors that may have done the
johnc@3175 146 // discovery concurrently, or interleaved, with mutator execution.
johnc@3175 147 void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
johnc@3175 148
johnc@3175 149 // Move to the next discovered reference.
johnc@3175 150 inline void next() {
johnc@3175 151 _prev_next = _discovered_addr;
johnc@3175 152 _prev = _ref;
johnc@3175 153 move_to_next();
johnc@3175 154 }
johnc@3175 155
johnc@3175 156 // Remove the current reference from the list
johnc@3175 157 void remove();
johnc@3175 158
johnc@3175 159 // Make the Reference object active again.
johnc@3175 160 void make_active();
johnc@3175 161
johnc@3175 162 // Make the referent alive.
johnc@3175 163 inline void make_referent_alive() {
johnc@3175 164 if (UseCompressedOops) {
johnc@3175 165 _keep_alive->do_oop((narrowOop*)_referent_addr);
johnc@3175 166 } else {
johnc@3175 167 _keep_alive->do_oop((oop*)_referent_addr);
johnc@3175 168 }
johnc@3175 169 }
johnc@3175 170
johnc@3175 171 // Update the discovered field.
johnc@3175 172 inline void update_discovered() {
johnc@3175 173 // First _prev_next ref actually points into DiscoveredList (gross).
johnc@3175 174 if (UseCompressedOops) {
johnc@3175 175 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
johnc@3175 176 _keep_alive->do_oop((narrowOop*)_prev_next);
johnc@3175 177 }
johnc@3175 178 } else {
johnc@3175 179 if (!oopDesc::is_null(*(oop*)_prev_next)) {
johnc@3175 180 _keep_alive->do_oop((oop*)_prev_next);
johnc@3175 181 }
johnc@3175 182 }
johnc@3175 183 }
johnc@3175 184
johnc@3175 185 // NULL out referent pointer.
johnc@3175 186 void clear_referent();
johnc@3175 187
johnc@3175 188 // Statistics
johnc@3175 189 NOT_PRODUCT(
johnc@3175 190 inline size_t processed() const { return _processed; }
johnc@3175 191 inline size_t removed() const { return _removed; }
johnc@3175 192 )
johnc@3175 193
johnc@3175 194 inline void move_to_next() {
johnc@3175 195 if (_ref == _next) {
johnc@3175 196 // End of the list.
johnc@3175 197 _ref = NULL;
johnc@3175 198 } else {
johnc@3175 199 _ref = _next;
johnc@3175 200 }
johnc@3175 201 assert(_ref != _first_seen, "cyclic ref_list found");
johnc@3175 202 NOT_PRODUCT(_processed++);
johnc@3175 203 }
johnc@3175 204
johnc@3175 205 };
duke@435 206
duke@435 207 class ReferenceProcessor : public CHeapObj {
duke@435 208 protected:
ysr@3117 209 // Compatibility with pre-4965777 JDK's
ysr@3117 210 static bool _pending_list_uses_discovered_field;
johnc@3175 211
johnc@3175 212 MemRegion _span; // (right-open) interval of heap
johnc@3175 213 // subject to wkref discovery
johnc@3175 214
johnc@3175 215 bool _discovering_refs; // true when discovery enabled
johnc@3175 216 bool _discovery_is_atomic; // if discovery is atomic wrt
johnc@3175 217 // other collectors in configuration
johnc@3175 218 bool _discovery_is_mt; // true if reference discovery is MT.
johnc@3175 219
ysr@777 220 // If true, setting "next" field of a discovered refs list requires
ysr@777 221 // write barrier(s). (Must be true if used in a collector in which
ysr@777 222 // elements of a discovered list may be moved during discovery: for
ysr@777 223 // example, a collector like Garbage-First that moves objects during a
ysr@777 224 // long-term concurrent marking phase that does weak reference
ysr@777 225 // discovery.)
ysr@777 226 bool _discovered_list_needs_barrier;
duke@435 227
johnc@3175 228 BarrierSet* _bs; // Cached copy of BarrierSet.
johnc@3175 229 bool _enqueuing_is_done; // true if all weak references enqueued
johnc@3175 230 bool _processing_is_mt; // true during phases when
johnc@3175 231 // reference processing is MT.
johnc@3175 232 int _next_id; // round-robin mod _num_q counter in
johnc@3175 233 // support of work distribution
johnc@3175 234
johnc@3175 235 // For collectors that do not keep GC liveness information
duke@435 236 // in the object header, this field holds a closure that
duke@435 237 // helps the reference processor determine the reachability
johnc@3175 238 // of an oop. It is currently initialized to NULL for all
johnc@3175 239 // collectors except for CMS and G1.
duke@435 240 BoolObjectClosure* _is_alive_non_header;
duke@435 241
ysr@888 242 // Soft ref clearing policies
ysr@888 243 // . the default policy
ysr@888 244 static ReferencePolicy* _default_soft_ref_policy;
ysr@888 245 // . the "clear all" policy
ysr@888 246 static ReferencePolicy* _always_clear_soft_ref_policy;
ysr@888 247 // . the current policy below is either one of the above
ysr@888 248 ReferencePolicy* _current_soft_ref_policy;
ysr@888 249
duke@435 250 // The discovered ref lists themselves
coleenp@548 251
jmasa@2188 252 // The active MT'ness degree of the queues below
coleenp@548 253 int _num_q;
jmasa@2188 254 // The maximum MT'ness degree of the queues below
jmasa@2188 255 int _max_num_q;
coleenp@548 256 // Arrays of lists of oops, one per thread
coleenp@548 257 DiscoveredList* _discoveredSoftRefs;
duke@435 258 DiscoveredList* _discoveredWeakRefs;
duke@435 259 DiscoveredList* _discoveredFinalRefs;
duke@435 260 DiscoveredList* _discoveredPhantomRefs;
duke@435 261
duke@435 262 public:
johnc@3175 263 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
johnc@3175 264
johnc@3175 265 int num_q() { return _num_q; }
johnc@3175 266 int max_num_q() { return _max_num_q; }
johnc@3175 267 void set_active_mt_degree(int v) { _num_q = v; }
johnc@3175 268 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
johnc@3175 269
ysr@892 270 ReferencePolicy* setup_policy(bool always_clear) {
ysr@888 271 _current_soft_ref_policy = always_clear ?
ysr@888 272 _always_clear_soft_ref_policy : _default_soft_ref_policy;
ysr@892 273 _current_soft_ref_policy->setup(); // snapshot the policy threshold
ysr@888 274 return _current_soft_ref_policy;
ysr@888 275 }
duke@435 276
duke@435 277 // Process references with a certain reachability level.
duke@435 278 void process_discovered_reflist(DiscoveredList refs_lists[],
duke@435 279 ReferencePolicy* policy,
duke@435 280 bool clear_referent,
duke@435 281 BoolObjectClosure* is_alive,
duke@435 282 OopClosure* keep_alive,
duke@435 283 VoidClosure* complete_gc,
duke@435 284 AbstractRefProcTaskExecutor* task_executor);
duke@435 285
duke@435 286 void process_phaseJNI(BoolObjectClosure* is_alive,
duke@435 287 OopClosure* keep_alive,
duke@435 288 VoidClosure* complete_gc);
duke@435 289
duke@435 290 // Work methods used by the method process_discovered_reflist
duke@435 291 // Phase1: keep alive all those referents that are otherwise
duke@435 292 // dead but which must be kept alive by policy (and their closure).
coleenp@548 293 void process_phase1(DiscoveredList& refs_list,
duke@435 294 ReferencePolicy* policy,
duke@435 295 BoolObjectClosure* is_alive,
duke@435 296 OopClosure* keep_alive,
duke@435 297 VoidClosure* complete_gc);
duke@435 298 // Phase2: remove all those references whose referents are
duke@435 299 // reachable.
coleenp@548 300 inline void process_phase2(DiscoveredList& refs_list,
duke@435 301 BoolObjectClosure* is_alive,
duke@435 302 OopClosure* keep_alive,
duke@435 303 VoidClosure* complete_gc) {
duke@435 304 if (discovery_is_atomic()) {
duke@435 305 // complete_gc is ignored in this case for this phase
coleenp@548 306 pp2_work(refs_list, is_alive, keep_alive);
duke@435 307 } else {
duke@435 308 assert(complete_gc != NULL, "Error");
coleenp@548 309 pp2_work_concurrent_discovery(refs_list, is_alive,
duke@435 310 keep_alive, complete_gc);
duke@435 311 }
duke@435 312 }
duke@435 313 // Work methods in support of process_phase2
coleenp@548 314 void pp2_work(DiscoveredList& refs_list,
duke@435 315 BoolObjectClosure* is_alive,
duke@435 316 OopClosure* keep_alive);
duke@435 317 void pp2_work_concurrent_discovery(
coleenp@548 318 DiscoveredList& refs_list,
duke@435 319 BoolObjectClosure* is_alive,
duke@435 320 OopClosure* keep_alive,
duke@435 321 VoidClosure* complete_gc);
duke@435 322 // Phase3: process the referents by either clearing them
duke@435 323 // or keeping them alive (and their closure)
coleenp@548 324 void process_phase3(DiscoveredList& refs_list,
duke@435 325 bool clear_referent,
duke@435 326 BoolObjectClosure* is_alive,
duke@435 327 OopClosure* keep_alive,
duke@435 328 VoidClosure* complete_gc);
duke@435 329
duke@435 330 // Enqueue references with a certain reachability level
coleenp@548 331 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
duke@435 332
duke@435 333 // "Preclean" all the discovered reference lists
duke@435 334 // by removing references with strongly reachable referents.
duke@435 335 // The first argument is a predicate on an oop that indicates
duke@435 336 // its (strong) reachability and the second is a closure that
duke@435 337 // may be used to incrementalize or abort the precleaning process.
duke@435 338 // The caller is responsible for taking care of potential
duke@435 339 // interference with concurrent operations on these lists
duke@435 340 // (or predicates involved) by other threads. Currently
jmasa@1625 341 // only used by the CMS collector. should_unload_classes is
jmasa@1625 342 // used to aid assertion checking when classes are collected.
duke@435 343 void preclean_discovered_references(BoolObjectClosure* is_alive,
duke@435 344 OopClosure* keep_alive,
duke@435 345 VoidClosure* complete_gc,
jmasa@1625 346 YieldClosure* yield,
jmasa@1625 347 bool should_unload_classes);
duke@435 348
duke@435 349 // Delete entries in the discovered lists that have
duke@435 350 // either a null referent or are not active. Such
duke@435 351 // Reference objects can result from the clearing
duke@435 352 // or enqueueing of Reference objects concurrent
duke@435 353 // with their discovery by a (concurrent) collector.
duke@435 354 // For a definition of "active" see java.lang.ref.Reference;
duke@435 355 // Refs are born active, become inactive when enqueued,
duke@435 356 // and never become active again. The state of being
duke@435 357 // active is encoded as follows: A Ref is active
duke@435 358 // if and only if its "next" field is NULL.
duke@435 359 void clean_up_discovered_references();
duke@435 360 void clean_up_discovered_reflist(DiscoveredList& refs_list);
duke@435 361
duke@435 362 // Returns the name of the discovered reference list
duke@435 363 // occupying the i / _num_q slot.
duke@435 364 const char* list_name(int i);
duke@435 365
coleenp@548 366 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
coleenp@548 367
duke@435 368 protected:
johnc@3175 369 // Set the 'discovered' field of the given reference to
johnc@3175 370 // the given value - emitting barriers depending upon
johnc@3175 371 // the value of _discovered_list_needs_barrier.
johnc@3175 372 void set_discovered(oop ref, oop value);
johnc@3175 373
duke@435 374 // "Preclean" the given discovered reference list
duke@435 375 // by removing references with strongly reachable referents.
duke@435 376 // Currently used in support of CMS only.
duke@435 377 void preclean_discovered_reflist(DiscoveredList& refs_list,
duke@435 378 BoolObjectClosure* is_alive,
duke@435 379 OopClosure* keep_alive,
duke@435 380 VoidClosure* complete_gc,
duke@435 381 YieldClosure* yield);
duke@435 382
ysr@2651 383 // round-robin mod _num_q (not: _not_ mode _max_num_q)
duke@435 384 int next_id() {
duke@435 385 int id = _next_id;
duke@435 386 if (++_next_id == _num_q) {
duke@435 387 _next_id = 0;
duke@435 388 }
duke@435 389 return id;
duke@435 390 }
duke@435 391 DiscoveredList* get_discovered_list(ReferenceType rt);
duke@435 392 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
coleenp@548 393 HeapWord* discovered_addr);
duke@435 394 void verify_ok_to_handle_reflists() PRODUCT_RETURN;
duke@435 395
stefank@3115 396 void clear_discovered_references(DiscoveredList& refs_list);
duke@435 397 void abandon_partial_discovered_list(DiscoveredList& refs_list);
duke@435 398
duke@435 399 // Calculate the number of jni handles.
duke@435 400 unsigned int count_jni_refs();
duke@435 401
duke@435 402 // Balances reference queues.
duke@435 403 void balance_queues(DiscoveredList ref_lists[]);
duke@435 404
duke@435 405 // Update (advance) the soft ref master clock field.
duke@435 406 void update_soft_ref_master_clock();
duke@435 407
duke@435 408 public:
duke@435 409 // constructor
duke@435 410 ReferenceProcessor():
duke@435 411 _span((HeapWord*)NULL, (HeapWord*)NULL),
duke@435 412 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL),
duke@435 413 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
duke@435 414 _discovering_refs(false),
duke@435 415 _discovery_is_atomic(true),
duke@435 416 _enqueuing_is_done(false),
duke@435 417 _discovery_is_mt(false),
ysr@777 418 _discovered_list_needs_barrier(false),
ysr@777 419 _bs(NULL),
duke@435 420 _is_alive_non_header(NULL),
duke@435 421 _num_q(0),
jmasa@2188 422 _max_num_q(0),
duke@435 423 _processing_is_mt(false),
duke@435 424 _next_id(0)
ysr@2651 425 { }
duke@435 426
ysr@2651 427 // Default parameters give you a vanilla reference processor.
ysr@2651 428 ReferenceProcessor(MemRegion span,
ysr@2651 429 bool mt_processing = false, int mt_processing_degree = 1,
ysr@2651 430 bool mt_discovery = false, int mt_discovery_degree = 1,
ysr@2651 431 bool atomic_discovery = true,
ysr@2651 432 BoolObjectClosure* is_alive_non_header = NULL,
ysr@777 433 bool discovered_list_needs_barrier = false);
duke@435 434
duke@435 435 // RefDiscoveryPolicy values
johnc@1679 436 enum DiscoveryPolicy {
duke@435 437 ReferenceBasedDiscovery = 0,
johnc@1679 438 ReferentBasedDiscovery = 1,
johnc@1679 439 DiscoveryPolicyMin = ReferenceBasedDiscovery,
johnc@1679 440 DiscoveryPolicyMax = ReferentBasedDiscovery
duke@435 441 };
duke@435 442
duke@435 443 static void init_statics();
duke@435 444
duke@435 445 public:
duke@435 446 // get and set "is_alive_non_header" field
duke@435 447 BoolObjectClosure* is_alive_non_header() {
duke@435 448 return _is_alive_non_header;
duke@435 449 }
duke@435 450 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
duke@435 451 _is_alive_non_header = is_alive_non_header;
duke@435 452 }
duke@435 453
duke@435 454 // get and set span
duke@435 455 MemRegion span() { return _span; }
duke@435 456 void set_span(MemRegion span) { _span = span; }
duke@435 457
duke@435 458 // start and stop weak ref discovery
johnc@3175 459 void enable_discovery(bool verify_disabled, bool check_no_refs) {
johnc@3175 460 #ifdef ASSERT
johnc@3175 461 // Verify that we're not currently discovering refs
johnc@3175 462 assert(!verify_disabled || !_discovering_refs, "nested call?");
johnc@3175 463
johnc@3175 464 if (check_no_refs) {
johnc@3175 465 // Verify that the discovered lists are empty
johnc@3175 466 verify_no_references_recorded();
johnc@3175 467 }
johnc@3175 468 #endif // ASSERT
johnc@3175 469 _discovering_refs = true;
johnc@3175 470 }
johnc@3175 471
duke@435 472 void disable_discovery() { _discovering_refs = false; }
duke@435 473 bool discovery_enabled() { return _discovering_refs; }
duke@435 474
duke@435 475 // whether discovery is atomic wrt other collectors
duke@435 476 bool discovery_is_atomic() const { return _discovery_is_atomic; }
duke@435 477 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
duke@435 478
ysr@3117 479 // whether the JDK in which we are embedded is a pre-4965777 JDK,
ysr@3117 480 // and thus whether or not it uses the discovered field to chain
ysr@3117 481 // the entries in the pending list.
ysr@3117 482 static bool pending_list_uses_discovered_field() {
ysr@3117 483 return _pending_list_uses_discovered_field;
ysr@3117 484 }
ysr@3117 485
duke@435 486 // whether discovery is done by multiple threads same-old-timeously
duke@435 487 bool discovery_is_mt() const { return _discovery_is_mt; }
duke@435 488 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
duke@435 489
duke@435 490 // Whether we are in a phase when _processing_ is MT.
duke@435 491 bool processing_is_mt() const { return _processing_is_mt; }
duke@435 492 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
duke@435 493
duke@435 494 // whether all enqueuing of weak references is complete
duke@435 495 bool enqueuing_is_done() { return _enqueuing_is_done; }
duke@435 496 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
duke@435 497
duke@435 498 // iterate over oops
duke@435 499 void weak_oops_do(OopClosure* f); // weak roots
duke@435 500
jmasa@2188 501 // Balance each of the discovered lists.
jmasa@2188 502 void balance_all_queues();
jmasa@2188 503
duke@435 504 // Discover a Reference object, using appropriate discovery criteria
duke@435 505 bool discover_reference(oop obj, ReferenceType rt);
duke@435 506
duke@435 507 // Process references found during GC (called by the garbage collector)
ysr@888 508 void process_discovered_references(BoolObjectClosure* is_alive,
duke@435 509 OopClosure* keep_alive,
duke@435 510 VoidClosure* complete_gc,
duke@435 511 AbstractRefProcTaskExecutor* task_executor);
duke@435 512
duke@435 513 public:
duke@435 514 // Enqueue references at end of GC (called by the garbage collector)
duke@435 515 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
duke@435 516
ysr@777 517 // If a discovery is in process that is being superceded, abandon it: all
ysr@777 518 // the discovered lists will be empty, and all the objects on them will
ysr@777 519 // have NULL discovered fields. Must be called only at a safepoint.
ysr@777 520 void abandon_partial_discovery();
ysr@777 521
duke@435 522 // debugging
duke@435 523 void verify_no_references_recorded() PRODUCT_RETURN;
ysr@2337 524 void verify_referent(oop obj) PRODUCT_RETURN;
duke@435 525
duke@435 526 // clear the discovered lists (unlinking each entry).
duke@435 527 void clear_discovered_references() PRODUCT_RETURN;
duke@435 528 };
duke@435 529
duke@435 530 // A utility class to disable reference discovery in
duke@435 531 // the scope which contains it, for given ReferenceProcessor.
duke@435 532 class NoRefDiscovery: StackObj {
duke@435 533 private:
duke@435 534 ReferenceProcessor* _rp;
duke@435 535 bool _was_discovering_refs;
duke@435 536 public:
duke@435 537 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
twisti@2144 538 _was_discovering_refs = _rp->discovery_enabled();
twisti@2144 539 if (_was_discovering_refs) {
duke@435 540 _rp->disable_discovery();
duke@435 541 }
duke@435 542 }
duke@435 543
duke@435 544 ~NoRefDiscovery() {
duke@435 545 if (_was_discovering_refs) {
johnc@3175 546 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
duke@435 547 }
duke@435 548 }
duke@435 549 };
duke@435 550
duke@435 551
duke@435 552 // A utility class to temporarily mutate the span of the
duke@435 553 // given ReferenceProcessor in the scope that contains it.
duke@435 554 class ReferenceProcessorSpanMutator: StackObj {
duke@435 555 private:
duke@435 556 ReferenceProcessor* _rp;
duke@435 557 MemRegion _saved_span;
duke@435 558
duke@435 559 public:
duke@435 560 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
duke@435 561 MemRegion span):
duke@435 562 _rp(rp) {
duke@435 563 _saved_span = _rp->span();
duke@435 564 _rp->set_span(span);
duke@435 565 }
duke@435 566
duke@435 567 ~ReferenceProcessorSpanMutator() {
duke@435 568 _rp->set_span(_saved_span);
duke@435 569 }
duke@435 570 };
duke@435 571
duke@435 572 // A utility class to temporarily change the MT'ness of
duke@435 573 // reference discovery for the given ReferenceProcessor
duke@435 574 // in the scope that contains it.
ysr@2651 575 class ReferenceProcessorMTDiscoveryMutator: StackObj {
duke@435 576 private:
duke@435 577 ReferenceProcessor* _rp;
duke@435 578 bool _saved_mt;
duke@435 579
duke@435 580 public:
ysr@2651 581 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
ysr@2651 582 bool mt):
duke@435 583 _rp(rp) {
duke@435 584 _saved_mt = _rp->discovery_is_mt();
duke@435 585 _rp->set_mt_discovery(mt);
duke@435 586 }
duke@435 587
ysr@2651 588 ~ReferenceProcessorMTDiscoveryMutator() {
duke@435 589 _rp->set_mt_discovery(_saved_mt);
duke@435 590 }
duke@435 591 };
duke@435 592
duke@435 593
duke@435 594 // A utility class to temporarily change the disposition
duke@435 595 // of the "is_alive_non_header" closure field of the
duke@435 596 // given ReferenceProcessor in the scope that contains it.
duke@435 597 class ReferenceProcessorIsAliveMutator: StackObj {
duke@435 598 private:
duke@435 599 ReferenceProcessor* _rp;
duke@435 600 BoolObjectClosure* _saved_cl;
duke@435 601
duke@435 602 public:
duke@435 603 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
duke@435 604 BoolObjectClosure* cl):
duke@435 605 _rp(rp) {
duke@435 606 _saved_cl = _rp->is_alive_non_header();
duke@435 607 _rp->set_is_alive_non_header(cl);
duke@435 608 }
duke@435 609
duke@435 610 ~ReferenceProcessorIsAliveMutator() {
duke@435 611 _rp->set_is_alive_non_header(_saved_cl);
duke@435 612 }
duke@435 613 };
duke@435 614
duke@435 615 // A utility class to temporarily change the disposition
duke@435 616 // of the "discovery_is_atomic" field of the
duke@435 617 // given ReferenceProcessor in the scope that contains it.
duke@435 618 class ReferenceProcessorAtomicMutator: StackObj {
duke@435 619 private:
duke@435 620 ReferenceProcessor* _rp;
duke@435 621 bool _saved_atomic_discovery;
duke@435 622
duke@435 623 public:
duke@435 624 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
duke@435 625 bool atomic):
duke@435 626 _rp(rp) {
duke@435 627 _saved_atomic_discovery = _rp->discovery_is_atomic();
duke@435 628 _rp->set_atomic_discovery(atomic);
duke@435 629 }
duke@435 630
duke@435 631 ~ReferenceProcessorAtomicMutator() {
duke@435 632 _rp->set_atomic_discovery(_saved_atomic_discovery);
duke@435 633 }
duke@435 634 };
duke@435 635
duke@435 636
duke@435 637 // A utility class to temporarily change the MT processing
duke@435 638 // disposition of the given ReferenceProcessor instance
duke@435 639 // in the scope that contains it.
duke@435 640 class ReferenceProcessorMTProcMutator: StackObj {
duke@435 641 private:
duke@435 642 ReferenceProcessor* _rp;
duke@435 643 bool _saved_mt;
duke@435 644
duke@435 645 public:
duke@435 646 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
duke@435 647 bool mt):
duke@435 648 _rp(rp) {
duke@435 649 _saved_mt = _rp->processing_is_mt();
duke@435 650 _rp->set_mt_processing(mt);
duke@435 651 }
duke@435 652
duke@435 653 ~ReferenceProcessorMTProcMutator() {
duke@435 654 _rp->set_mt_processing(_saved_mt);
duke@435 655 }
duke@435 656 };
duke@435 657
duke@435 658
duke@435 659 // This class is an interface used to implement task execution for the
duke@435 660 // reference processing.
duke@435 661 class AbstractRefProcTaskExecutor {
duke@435 662 public:
duke@435 663
duke@435 664 // Abstract tasks to execute.
duke@435 665 class ProcessTask;
duke@435 666 class EnqueueTask;
duke@435 667
duke@435 668 // Executes a task using worker threads.
duke@435 669 virtual void execute(ProcessTask& task) = 0;
duke@435 670 virtual void execute(EnqueueTask& task) = 0;
duke@435 671
duke@435 672 // Switch to single threaded mode.
duke@435 673 virtual void set_single_threaded_mode() { };
duke@435 674 };
duke@435 675
duke@435 676 // Abstract reference processing task to execute.
duke@435 677 class AbstractRefProcTaskExecutor::ProcessTask {
duke@435 678 protected:
duke@435 679 ProcessTask(ReferenceProcessor& ref_processor,
duke@435 680 DiscoveredList refs_lists[],
duke@435 681 bool marks_oops_alive)
duke@435 682 : _ref_processor(ref_processor),
duke@435 683 _refs_lists(refs_lists),
duke@435 684 _marks_oops_alive(marks_oops_alive)
duke@435 685 { }
duke@435 686
duke@435 687 public:
duke@435 688 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
duke@435 689 OopClosure& keep_alive,
duke@435 690 VoidClosure& complete_gc) = 0;
duke@435 691
duke@435 692 // Returns true if a task marks some oops as alive.
duke@435 693 bool marks_oops_alive() const
duke@435 694 { return _marks_oops_alive; }
duke@435 695
duke@435 696 protected:
duke@435 697 ReferenceProcessor& _ref_processor;
duke@435 698 DiscoveredList* _refs_lists;
duke@435 699 const bool _marks_oops_alive;
duke@435 700 };
duke@435 701
duke@435 702 // Abstract reference processing task to execute.
duke@435 703 class AbstractRefProcTaskExecutor::EnqueueTask {
duke@435 704 protected:
duke@435 705 EnqueueTask(ReferenceProcessor& ref_processor,
duke@435 706 DiscoveredList refs_lists[],
coleenp@548 707 HeapWord* pending_list_addr,
duke@435 708 int n_queues)
duke@435 709 : _ref_processor(ref_processor),
duke@435 710 _refs_lists(refs_lists),
duke@435 711 _pending_list_addr(pending_list_addr),
duke@435 712 _n_queues(n_queues)
duke@435 713 { }
duke@435 714
duke@435 715 public:
duke@435 716 virtual void work(unsigned int work_id) = 0;
duke@435 717
duke@435 718 protected:
duke@435 719 ReferenceProcessor& _ref_processor;
duke@435 720 DiscoveredList* _refs_lists;
coleenp@548 721 HeapWord* _pending_list_addr;
duke@435 722 int _n_queues;
duke@435 723 };
stefank@2314 724
stefank@2314 725 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP

mercurial