Wed, 26 Jun 2013 16:58:37 +0200
8013590: NPG: Add a memory pool MXBean for Metaspace
Reviewed-by: jmasa, mgerdin
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
28 #include "memory/referencePolicy.hpp"
29 #include "memory/referenceProcessorStats.hpp"
30 #include "memory/referenceType.hpp"
31 #include "oops/instanceRefKlass.hpp"
33 class GCTimer;
35 // ReferenceProcessor class encapsulates the per-"collector" processing
36 // of java.lang.Reference objects for GC. The interface is useful for supporting
37 // a generational abstraction, in particular when there are multiple
38 // generations that are being independently collected -- possibly
39 // concurrently and/or incrementally. Note, however, that the
40 // ReferenceProcessor class abstracts away from a generational setting
41 // by using only a heap interval (called "span" below), thus allowing
42 // its use in a straightforward manner in a general, non-generational
43 // setting.
44 //
45 // The basic idea is that each ReferenceProcessor object concerns
46 // itself with ("weak") reference processing in a specific "span"
47 // of the heap of interest to a specific collector. Currently,
48 // the span is a convex interval of the heap, but, efficiency
49 // apart, there seems to be no reason it couldn't be extended
50 // (with appropriate modifications) to any "non-convex interval".
52 // forward references
53 class ReferencePolicy;
54 class AbstractRefProcTaskExecutor;
56 // List of discovered references.
57 class DiscoveredList {
58 public:
59 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
60 oop head() const {
61 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
62 _oop_head;
63 }
64 HeapWord* adr_head() {
65 return UseCompressedOops ? (HeapWord*)&_compressed_head :
66 (HeapWord*)&_oop_head;
67 }
68 void set_head(oop o) {
69 if (UseCompressedOops) {
70 // Must compress the head ptr.
71 _compressed_head = oopDesc::encode_heap_oop(o);
72 } else {
73 _oop_head = o;
74 }
75 }
76 bool is_empty() const { return head() == NULL; }
77 size_t length() { return _len; }
78 void set_length(size_t len) { _len = len; }
79 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
80 void dec_length(size_t dec) { _len -= dec; }
81 private:
82 // Set value depending on UseCompressedOops. This could be a template class
83 // but then we have to fix all the instantiations and declarations that use this class.
84 oop _oop_head;
85 narrowOop _compressed_head;
86 size_t _len;
87 };
89 // Iterator for the list of discovered references.
90 class DiscoveredListIterator {
91 private:
92 DiscoveredList& _refs_list;
93 HeapWord* _prev_next;
94 oop _prev;
95 oop _ref;
96 HeapWord* _discovered_addr;
97 oop _next;
98 HeapWord* _referent_addr;
99 oop _referent;
100 OopClosure* _keep_alive;
101 BoolObjectClosure* _is_alive;
103 DEBUG_ONLY(
104 oop _first_seen; // cyclic linked list check
105 )
107 NOT_PRODUCT(
108 size_t _processed;
109 size_t _removed;
110 )
112 public:
113 inline DiscoveredListIterator(DiscoveredList& refs_list,
114 OopClosure* keep_alive,
115 BoolObjectClosure* is_alive):
116 _refs_list(refs_list),
117 _prev_next(refs_list.adr_head()),
118 _prev(NULL),
119 _ref(refs_list.head()),
120 #ifdef ASSERT
121 _first_seen(refs_list.head()),
122 #endif
123 #ifndef PRODUCT
124 _processed(0),
125 _removed(0),
126 #endif
127 _next(NULL),
128 _keep_alive(keep_alive),
129 _is_alive(is_alive)
130 { }
132 // End Of List.
133 inline bool has_next() const { return _ref != NULL; }
135 // Get oop to the Reference object.
136 inline oop obj() const { return _ref; }
138 // Get oop to the referent object.
139 inline oop referent() const { return _referent; }
141 // Returns true if referent is alive.
142 inline bool is_referent_alive() const {
143 return _is_alive->do_object_b(_referent);
144 }
146 // Loads data for the current reference.
147 // The "allow_null_referent" argument tells us to allow for the possibility
148 // of a NULL referent in the discovered Reference object. This typically
149 // happens in the case of concurrent collectors that may have done the
150 // discovery concurrently, or interleaved, with mutator execution.
151 void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
153 // Move to the next discovered reference.
154 inline void next() {
155 _prev_next = _discovered_addr;
156 _prev = _ref;
157 move_to_next();
158 }
160 // Remove the current reference from the list
161 void remove();
163 // Make the Reference object active again.
164 void make_active();
166 // Make the referent alive.
167 inline void make_referent_alive() {
168 if (UseCompressedOops) {
169 _keep_alive->do_oop((narrowOop*)_referent_addr);
170 } else {
171 _keep_alive->do_oop((oop*)_referent_addr);
172 }
173 }
175 // Update the discovered field.
176 inline void update_discovered() {
177 // First _prev_next ref actually points into DiscoveredList (gross).
178 if (UseCompressedOops) {
179 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
180 _keep_alive->do_oop((narrowOop*)_prev_next);
181 }
182 } else {
183 if (!oopDesc::is_null(*(oop*)_prev_next)) {
184 _keep_alive->do_oop((oop*)_prev_next);
185 }
186 }
187 }
189 // NULL out referent pointer.
190 void clear_referent();
192 // Statistics
193 NOT_PRODUCT(
194 inline size_t processed() const { return _processed; }
195 inline size_t removed() const { return _removed; }
196 )
198 inline void move_to_next() {
199 if (_ref == _next) {
200 // End of the list.
201 _ref = NULL;
202 } else {
203 _ref = _next;
204 }
205 assert(_ref != _first_seen, "cyclic ref_list found");
206 NOT_PRODUCT(_processed++);
207 }
208 };
210 class ReferenceProcessor : public CHeapObj<mtGC> {
212 private:
213 size_t total_count(DiscoveredList lists[]);
215 protected:
216 // Compatibility with pre-4965777 JDK's
217 static bool _pending_list_uses_discovered_field;
219 // The SoftReference master timestamp clock
220 static jlong _soft_ref_timestamp_clock;
222 MemRegion _span; // (right-open) interval of heap
223 // subject to wkref discovery
225 bool _discovering_refs; // true when discovery enabled
226 bool _discovery_is_atomic; // if discovery is atomic wrt
227 // other collectors in configuration
228 bool _discovery_is_mt; // true if reference discovery is MT.
230 // If true, setting "next" field of a discovered refs list requires
231 // write barrier(s). (Must be true if used in a collector in which
232 // elements of a discovered list may be moved during discovery: for
233 // example, a collector like Garbage-First that moves objects during a
234 // long-term concurrent marking phase that does weak reference
235 // discovery.)
236 bool _discovered_list_needs_barrier;
238 BarrierSet* _bs; // Cached copy of BarrierSet.
239 bool _enqueuing_is_done; // true if all weak references enqueued
240 bool _processing_is_mt; // true during phases when
241 // reference processing is MT.
242 uint _next_id; // round-robin mod _num_q counter in
243 // support of work distribution
245 // For collectors that do not keep GC liveness information
246 // in the object header, this field holds a closure that
247 // helps the reference processor determine the reachability
248 // of an oop. It is currently initialized to NULL for all
249 // collectors except for CMS and G1.
250 BoolObjectClosure* _is_alive_non_header;
252 // Soft ref clearing policies
253 // . the default policy
254 static ReferencePolicy* _default_soft_ref_policy;
255 // . the "clear all" policy
256 static ReferencePolicy* _always_clear_soft_ref_policy;
257 // . the current policy below is either one of the above
258 ReferencePolicy* _current_soft_ref_policy;
260 // The discovered ref lists themselves
262 // The active MT'ness degree of the queues below
263 uint _num_q;
264 // The maximum MT'ness degree of the queues below
265 uint _max_num_q;
267 // Master array of discovered oops
268 DiscoveredList* _discovered_refs;
270 // Arrays of lists of oops, one per thread (pointers into master array above)
271 DiscoveredList* _discoveredSoftRefs;
272 DiscoveredList* _discoveredWeakRefs;
273 DiscoveredList* _discoveredFinalRefs;
274 DiscoveredList* _discoveredPhantomRefs;
276 public:
277 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
279 uint num_q() { return _num_q; }
280 uint max_num_q() { return _max_num_q; }
281 void set_active_mt_degree(uint v) { _num_q = v; }
283 DiscoveredList* discovered_refs() { return _discovered_refs; }
285 ReferencePolicy* setup_policy(bool always_clear) {
286 _current_soft_ref_policy = always_clear ?
287 _always_clear_soft_ref_policy : _default_soft_ref_policy;
288 _current_soft_ref_policy->setup(); // snapshot the policy threshold
289 return _current_soft_ref_policy;
290 }
292 // Process references with a certain reachability level.
293 size_t process_discovered_reflist(DiscoveredList refs_lists[],
294 ReferencePolicy* policy,
295 bool clear_referent,
296 BoolObjectClosure* is_alive,
297 OopClosure* keep_alive,
298 VoidClosure* complete_gc,
299 AbstractRefProcTaskExecutor* task_executor);
301 void process_phaseJNI(BoolObjectClosure* is_alive,
302 OopClosure* keep_alive,
303 VoidClosure* complete_gc);
305 // Work methods used by the method process_discovered_reflist
306 // Phase1: keep alive all those referents that are otherwise
307 // dead but which must be kept alive by policy (and their closure).
308 void process_phase1(DiscoveredList& refs_list,
309 ReferencePolicy* policy,
310 BoolObjectClosure* is_alive,
311 OopClosure* keep_alive,
312 VoidClosure* complete_gc);
313 // Phase2: remove all those references whose referents are
314 // reachable.
315 inline void process_phase2(DiscoveredList& refs_list,
316 BoolObjectClosure* is_alive,
317 OopClosure* keep_alive,
318 VoidClosure* complete_gc) {
319 if (discovery_is_atomic()) {
320 // complete_gc is ignored in this case for this phase
321 pp2_work(refs_list, is_alive, keep_alive);
322 } else {
323 assert(complete_gc != NULL, "Error");
324 pp2_work_concurrent_discovery(refs_list, is_alive,
325 keep_alive, complete_gc);
326 }
327 }
328 // Work methods in support of process_phase2
329 void pp2_work(DiscoveredList& refs_list,
330 BoolObjectClosure* is_alive,
331 OopClosure* keep_alive);
332 void pp2_work_concurrent_discovery(
333 DiscoveredList& refs_list,
334 BoolObjectClosure* is_alive,
335 OopClosure* keep_alive,
336 VoidClosure* complete_gc);
337 // Phase3: process the referents by either clearing them
338 // or keeping them alive (and their closure)
339 void process_phase3(DiscoveredList& refs_list,
340 bool clear_referent,
341 BoolObjectClosure* is_alive,
342 OopClosure* keep_alive,
343 VoidClosure* complete_gc);
345 // Enqueue references with a certain reachability level
346 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
348 // "Preclean" all the discovered reference lists
349 // by removing references with strongly reachable referents.
350 // The first argument is a predicate on an oop that indicates
351 // its (strong) reachability and the second is a closure that
352 // may be used to incrementalize or abort the precleaning process.
353 // The caller is responsible for taking care of potential
354 // interference with concurrent operations on these lists
355 // (or predicates involved) by other threads. Currently
356 // only used by the CMS collector.
357 void preclean_discovered_references(BoolObjectClosure* is_alive,
358 OopClosure* keep_alive,
359 VoidClosure* complete_gc,
360 YieldClosure* yield,
361 GCTimer* gc_timer);
363 // Delete entries in the discovered lists that have
364 // either a null referent or are not active. Such
365 // Reference objects can result from the clearing
366 // or enqueueing of Reference objects concurrent
367 // with their discovery by a (concurrent) collector.
368 // For a definition of "active" see java.lang.ref.Reference;
369 // Refs are born active, become inactive when enqueued,
370 // and never become active again. The state of being
371 // active is encoded as follows: A Ref is active
372 // if and only if its "next" field is NULL.
373 void clean_up_discovered_references();
374 void clean_up_discovered_reflist(DiscoveredList& refs_list);
376 // Returns the name of the discovered reference list
377 // occupying the i / _num_q slot.
378 const char* list_name(uint i);
380 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
382 protected:
383 // Set the 'discovered' field of the given reference to
384 // the given value - emitting barriers depending upon
385 // the value of _discovered_list_needs_barrier.
386 void set_discovered(oop ref, oop value);
388 // "Preclean" the given discovered reference list
389 // by removing references with strongly reachable referents.
390 // Currently used in support of CMS only.
391 void preclean_discovered_reflist(DiscoveredList& refs_list,
392 BoolObjectClosure* is_alive,
393 OopClosure* keep_alive,
394 VoidClosure* complete_gc,
395 YieldClosure* yield);
397 // round-robin mod _num_q (not: _not_ mode _max_num_q)
398 uint next_id() {
399 uint id = _next_id;
400 if (++_next_id == _num_q) {
401 _next_id = 0;
402 }
403 return id;
404 }
405 DiscoveredList* get_discovered_list(ReferenceType rt);
406 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
407 HeapWord* discovered_addr);
408 void verify_ok_to_handle_reflists() PRODUCT_RETURN;
410 void clear_discovered_references(DiscoveredList& refs_list);
411 void abandon_partial_discovered_list(DiscoveredList& refs_list);
413 // Calculate the number of jni handles.
414 unsigned int count_jni_refs();
416 // Balances reference queues.
417 void balance_queues(DiscoveredList ref_lists[]);
419 // Update (advance) the soft ref master clock field.
420 void update_soft_ref_master_clock();
422 public:
423 // constructor
424 ReferenceProcessor():
425 _span((HeapWord*)NULL, (HeapWord*)NULL),
426 _discovered_refs(NULL),
427 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL),
428 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
429 _discovering_refs(false),
430 _discovery_is_atomic(true),
431 _enqueuing_is_done(false),
432 _discovery_is_mt(false),
433 _discovered_list_needs_barrier(false),
434 _bs(NULL),
435 _is_alive_non_header(NULL),
436 _num_q(0),
437 _max_num_q(0),
438 _processing_is_mt(false),
439 _next_id(0)
440 { }
442 // Default parameters give you a vanilla reference processor.
443 ReferenceProcessor(MemRegion span,
444 bool mt_processing = false, uint mt_processing_degree = 1,
445 bool mt_discovery = false, uint mt_discovery_degree = 1,
446 bool atomic_discovery = true,
447 BoolObjectClosure* is_alive_non_header = NULL,
448 bool discovered_list_needs_barrier = false);
450 // RefDiscoveryPolicy values
451 enum DiscoveryPolicy {
452 ReferenceBasedDiscovery = 0,
453 ReferentBasedDiscovery = 1,
454 DiscoveryPolicyMin = ReferenceBasedDiscovery,
455 DiscoveryPolicyMax = ReferentBasedDiscovery
456 };
458 static void init_statics();
460 public:
461 // get and set "is_alive_non_header" field
462 BoolObjectClosure* is_alive_non_header() {
463 return _is_alive_non_header;
464 }
465 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
466 _is_alive_non_header = is_alive_non_header;
467 }
469 // get and set span
470 MemRegion span() { return _span; }
471 void set_span(MemRegion span) { _span = span; }
473 // start and stop weak ref discovery
474 void enable_discovery(bool verify_disabled, bool check_no_refs);
475 void disable_discovery() { _discovering_refs = false; }
476 bool discovery_enabled() { return _discovering_refs; }
478 // whether discovery is atomic wrt other collectors
479 bool discovery_is_atomic() const { return _discovery_is_atomic; }
480 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
482 // whether the JDK in which we are embedded is a pre-4965777 JDK,
483 // and thus whether or not it uses the discovered field to chain
484 // the entries in the pending list.
485 static bool pending_list_uses_discovered_field() {
486 return _pending_list_uses_discovered_field;
487 }
489 // whether discovery is done by multiple threads same-old-timeously
490 bool discovery_is_mt() const { return _discovery_is_mt; }
491 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
493 // Whether we are in a phase when _processing_ is MT.
494 bool processing_is_mt() const { return _processing_is_mt; }
495 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
497 // whether all enqueuing of weak references is complete
498 bool enqueuing_is_done() { return _enqueuing_is_done; }
499 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
501 // iterate over oops
502 void weak_oops_do(OopClosure* f); // weak roots
504 // Balance each of the discovered lists.
505 void balance_all_queues();
506 void verify_list(DiscoveredList& ref_list);
508 // Discover a Reference object, using appropriate discovery criteria
509 bool discover_reference(oop obj, ReferenceType rt);
511 // Process references found during GC (called by the garbage collector)
512 ReferenceProcessorStats
513 process_discovered_references(BoolObjectClosure* is_alive,
514 OopClosure* keep_alive,
515 VoidClosure* complete_gc,
516 AbstractRefProcTaskExecutor* task_executor,
517 GCTimer *gc_timer);
519 // Enqueue references at end of GC (called by the garbage collector)
520 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
522 // If a discovery is in process that is being superceded, abandon it: all
523 // the discovered lists will be empty, and all the objects on them will
524 // have NULL discovered fields. Must be called only at a safepoint.
525 void abandon_partial_discovery();
527 // debugging
528 void verify_no_references_recorded() PRODUCT_RETURN;
529 void verify_referent(oop obj) PRODUCT_RETURN;
531 // clear the discovered lists (unlinking each entry).
532 void clear_discovered_references() PRODUCT_RETURN;
533 };
535 // A utility class to disable reference discovery in
536 // the scope which contains it, for given ReferenceProcessor.
537 class NoRefDiscovery: StackObj {
538 private:
539 ReferenceProcessor* _rp;
540 bool _was_discovering_refs;
541 public:
542 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
543 _was_discovering_refs = _rp->discovery_enabled();
544 if (_was_discovering_refs) {
545 _rp->disable_discovery();
546 }
547 }
549 ~NoRefDiscovery() {
550 if (_was_discovering_refs) {
551 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
552 }
553 }
554 };
557 // A utility class to temporarily mutate the span of the
558 // given ReferenceProcessor in the scope that contains it.
559 class ReferenceProcessorSpanMutator: StackObj {
560 private:
561 ReferenceProcessor* _rp;
562 MemRegion _saved_span;
564 public:
565 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
566 MemRegion span):
567 _rp(rp) {
568 _saved_span = _rp->span();
569 _rp->set_span(span);
570 }
572 ~ReferenceProcessorSpanMutator() {
573 _rp->set_span(_saved_span);
574 }
575 };
577 // A utility class to temporarily change the MT'ness of
578 // reference discovery for the given ReferenceProcessor
579 // in the scope that contains it.
580 class ReferenceProcessorMTDiscoveryMutator: StackObj {
581 private:
582 ReferenceProcessor* _rp;
583 bool _saved_mt;
585 public:
586 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
587 bool mt):
588 _rp(rp) {
589 _saved_mt = _rp->discovery_is_mt();
590 _rp->set_mt_discovery(mt);
591 }
593 ~ReferenceProcessorMTDiscoveryMutator() {
594 _rp->set_mt_discovery(_saved_mt);
595 }
596 };
599 // A utility class to temporarily change the disposition
600 // of the "is_alive_non_header" closure field of the
601 // given ReferenceProcessor in the scope that contains it.
602 class ReferenceProcessorIsAliveMutator: StackObj {
603 private:
604 ReferenceProcessor* _rp;
605 BoolObjectClosure* _saved_cl;
607 public:
608 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
609 BoolObjectClosure* cl):
610 _rp(rp) {
611 _saved_cl = _rp->is_alive_non_header();
612 _rp->set_is_alive_non_header(cl);
613 }
615 ~ReferenceProcessorIsAliveMutator() {
616 _rp->set_is_alive_non_header(_saved_cl);
617 }
618 };
620 // A utility class to temporarily change the disposition
621 // of the "discovery_is_atomic" field of the
622 // given ReferenceProcessor in the scope that contains it.
623 class ReferenceProcessorAtomicMutator: StackObj {
624 private:
625 ReferenceProcessor* _rp;
626 bool _saved_atomic_discovery;
628 public:
629 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
630 bool atomic):
631 _rp(rp) {
632 _saved_atomic_discovery = _rp->discovery_is_atomic();
633 _rp->set_atomic_discovery(atomic);
634 }
636 ~ReferenceProcessorAtomicMutator() {
637 _rp->set_atomic_discovery(_saved_atomic_discovery);
638 }
639 };
642 // A utility class to temporarily change the MT processing
643 // disposition of the given ReferenceProcessor instance
644 // in the scope that contains it.
645 class ReferenceProcessorMTProcMutator: StackObj {
646 private:
647 ReferenceProcessor* _rp;
648 bool _saved_mt;
650 public:
651 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
652 bool mt):
653 _rp(rp) {
654 _saved_mt = _rp->processing_is_mt();
655 _rp->set_mt_processing(mt);
656 }
658 ~ReferenceProcessorMTProcMutator() {
659 _rp->set_mt_processing(_saved_mt);
660 }
661 };
664 // This class is an interface used to implement task execution for the
665 // reference processing.
666 class AbstractRefProcTaskExecutor {
667 public:
669 // Abstract tasks to execute.
670 class ProcessTask;
671 class EnqueueTask;
673 // Executes a task using worker threads.
674 virtual void execute(ProcessTask& task) = 0;
675 virtual void execute(EnqueueTask& task) = 0;
677 // Switch to single threaded mode.
678 virtual void set_single_threaded_mode() { };
679 };
681 // Abstract reference processing task to execute.
682 class AbstractRefProcTaskExecutor::ProcessTask {
683 protected:
684 ProcessTask(ReferenceProcessor& ref_processor,
685 DiscoveredList refs_lists[],
686 bool marks_oops_alive)
687 : _ref_processor(ref_processor),
688 _refs_lists(refs_lists),
689 _marks_oops_alive(marks_oops_alive)
690 { }
692 public:
693 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
694 OopClosure& keep_alive,
695 VoidClosure& complete_gc) = 0;
697 // Returns true if a task marks some oops as alive.
698 bool marks_oops_alive() const
699 { return _marks_oops_alive; }
701 protected:
702 ReferenceProcessor& _ref_processor;
703 DiscoveredList* _refs_lists;
704 const bool _marks_oops_alive;
705 };
707 // Abstract reference processing task to execute.
708 class AbstractRefProcTaskExecutor::EnqueueTask {
709 protected:
710 EnqueueTask(ReferenceProcessor& ref_processor,
711 DiscoveredList refs_lists[],
712 HeapWord* pending_list_addr,
713 int n_queues)
714 : _ref_processor(ref_processor),
715 _refs_lists(refs_lists),
716 _pending_list_addr(pending_list_addr),
717 _n_queues(n_queues)
718 { }
720 public:
721 virtual void work(unsigned int work_id) = 0;
723 protected:
724 ReferenceProcessor& _ref_processor;
725 DiscoveredList* _refs_lists;
726 HeapWord* _pending_list_addr;
727 int _n_queues;
728 };
730 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP