Thu, 26 Jun 2014 11:36:58 +0200
8047818: G1 HeapRegions can no longer be ContiguousSpaces
Summary: Change parent of G1OffsetTableContigSpace to CompactibleSpace, reimplement missing functionality
Reviewed-by: stefank, jmasa, tschatzl
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
28 #include "gc_implementation/shared/gcTrace.hpp"
29 #include "memory/referencePolicy.hpp"
30 #include "memory/referenceProcessorStats.hpp"
31 #include "memory/referenceType.hpp"
32 #include "oops/instanceRefKlass.hpp"
34 class GCTimer;
36 // ReferenceProcessor class encapsulates the per-"collector" processing
37 // of java.lang.Reference objects for GC. The interface is useful for supporting
38 // a generational abstraction, in particular when there are multiple
39 // generations that are being independently collected -- possibly
40 // concurrently and/or incrementally. Note, however, that the
41 // ReferenceProcessor class abstracts away from a generational setting
42 // by using only a heap interval (called "span" below), thus allowing
43 // its use in a straightforward manner in a general, non-generational
44 // setting.
45 //
46 // The basic idea is that each ReferenceProcessor object concerns
47 // itself with ("weak") reference processing in a specific "span"
48 // of the heap of interest to a specific collector. Currently,
49 // the span is a convex interval of the heap, but, efficiency
50 // apart, there seems to be no reason it couldn't be extended
51 // (with appropriate modifications) to any "non-convex interval".
53 // forward references
54 class ReferencePolicy;
55 class AbstractRefProcTaskExecutor;
57 // List of discovered references.
58 class DiscoveredList {
59 public:
60 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
61 oop head() const {
62 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
63 _oop_head;
64 }
65 HeapWord* adr_head() {
66 return UseCompressedOops ? (HeapWord*)&_compressed_head :
67 (HeapWord*)&_oop_head;
68 }
69 void set_head(oop o) {
70 if (UseCompressedOops) {
71 // Must compress the head ptr.
72 _compressed_head = oopDesc::encode_heap_oop(o);
73 } else {
74 _oop_head = o;
75 }
76 }
77 bool is_empty() const { return head() == NULL; }
78 size_t length() { return _len; }
79 void set_length(size_t len) { _len = len; }
80 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
81 void dec_length(size_t dec) { _len -= dec; }
82 private:
83 // Set value depending on UseCompressedOops. This could be a template class
84 // but then we have to fix all the instantiations and declarations that use this class.
85 oop _oop_head;
86 narrowOop _compressed_head;
87 size_t _len;
88 };
90 // Iterator for the list of discovered references.
91 class DiscoveredListIterator {
92 private:
93 DiscoveredList& _refs_list;
94 HeapWord* _prev_next;
95 oop _prev;
96 oop _ref;
97 HeapWord* _discovered_addr;
98 oop _next;
99 HeapWord* _referent_addr;
100 oop _referent;
101 OopClosure* _keep_alive;
102 BoolObjectClosure* _is_alive;
104 DEBUG_ONLY(
105 oop _first_seen; // cyclic linked list check
106 )
108 NOT_PRODUCT(
109 size_t _processed;
110 size_t _removed;
111 )
113 public:
114 inline DiscoveredListIterator(DiscoveredList& refs_list,
115 OopClosure* keep_alive,
116 BoolObjectClosure* is_alive):
117 _refs_list(refs_list),
118 _prev_next(refs_list.adr_head()),
119 _prev(NULL),
120 _ref(refs_list.head()),
121 #ifdef ASSERT
122 _first_seen(refs_list.head()),
123 #endif
124 #ifndef PRODUCT
125 _processed(0),
126 _removed(0),
127 #endif
128 _next(NULL),
129 _keep_alive(keep_alive),
130 _is_alive(is_alive)
131 { }
133 // End Of List.
134 inline bool has_next() const { return _ref != NULL; }
136 // Get oop to the Reference object.
137 inline oop obj() const { return _ref; }
139 // Get oop to the referent object.
140 inline oop referent() const { return _referent; }
142 // Returns true if referent is alive.
143 inline bool is_referent_alive() const {
144 return _is_alive->do_object_b(_referent);
145 }
147 // Loads data for the current reference.
148 // The "allow_null_referent" argument tells us to allow for the possibility
149 // of a NULL referent in the discovered Reference object. This typically
150 // happens in the case of concurrent collectors that may have done the
151 // discovery concurrently, or interleaved, with mutator execution.
152 void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
154 // Move to the next discovered reference.
155 inline void next() {
156 _prev_next = _discovered_addr;
157 _prev = _ref;
158 move_to_next();
159 }
161 // Remove the current reference from the list
162 void remove();
164 // Make the Reference object active again.
165 void make_active();
167 // Make the referent alive.
168 inline void make_referent_alive() {
169 if (UseCompressedOops) {
170 _keep_alive->do_oop((narrowOop*)_referent_addr);
171 } else {
172 _keep_alive->do_oop((oop*)_referent_addr);
173 }
174 }
176 // Update the discovered field.
177 inline void update_discovered() {
178 // First _prev_next ref actually points into DiscoveredList (gross).
179 if (UseCompressedOops) {
180 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
181 _keep_alive->do_oop((narrowOop*)_prev_next);
182 }
183 } else {
184 if (!oopDesc::is_null(*(oop*)_prev_next)) {
185 _keep_alive->do_oop((oop*)_prev_next);
186 }
187 }
188 }
190 // NULL out referent pointer.
191 void clear_referent();
193 // Statistics
194 NOT_PRODUCT(
195 inline size_t processed() const { return _processed; }
196 inline size_t removed() const { return _removed; }
197 )
199 inline void move_to_next() {
200 if (_ref == _next) {
201 // End of the list.
202 _ref = NULL;
203 } else {
204 _ref = _next;
205 }
206 assert(_ref != _first_seen, "cyclic ref_list found");
207 NOT_PRODUCT(_processed++);
208 }
209 };
211 class ReferenceProcessor : public CHeapObj<mtGC> {
213 private:
214 size_t total_count(DiscoveredList lists[]);
216 protected:
217 // Compatibility with pre-4965777 JDK's
218 static bool _pending_list_uses_discovered_field;
220 // The SoftReference master timestamp clock
221 static jlong _soft_ref_timestamp_clock;
223 MemRegion _span; // (right-open) interval of heap
224 // subject to wkref discovery
226 bool _discovering_refs; // true when discovery enabled
227 bool _discovery_is_atomic; // if discovery is atomic wrt
228 // other collectors in configuration
229 bool _discovery_is_mt; // true if reference discovery is MT.
231 bool _enqueuing_is_done; // true if all weak references enqueued
232 bool _processing_is_mt; // true during phases when
233 // reference processing is MT.
234 uint _next_id; // round-robin mod _num_q counter in
235 // support of work distribution
237 // For collectors that do not keep GC liveness information
238 // in the object header, this field holds a closure that
239 // helps the reference processor determine the reachability
240 // of an oop. It is currently initialized to NULL for all
241 // collectors except for CMS and G1.
242 BoolObjectClosure* _is_alive_non_header;
244 // Soft ref clearing policies
245 // . the default policy
246 static ReferencePolicy* _default_soft_ref_policy;
247 // . the "clear all" policy
248 static ReferencePolicy* _always_clear_soft_ref_policy;
249 // . the current policy below is either one of the above
250 ReferencePolicy* _current_soft_ref_policy;
252 // The discovered ref lists themselves
254 // The active MT'ness degree of the queues below
255 uint _num_q;
256 // The maximum MT'ness degree of the queues below
257 uint _max_num_q;
259 // Master array of discovered oops
260 DiscoveredList* _discovered_refs;
262 // Arrays of lists of oops, one per thread (pointers into master array above)
263 DiscoveredList* _discoveredSoftRefs;
264 DiscoveredList* _discoveredWeakRefs;
265 DiscoveredList* _discoveredFinalRefs;
266 DiscoveredList* _discoveredPhantomRefs;
268 public:
269 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
271 uint num_q() { return _num_q; }
272 uint max_num_q() { return _max_num_q; }
273 void set_active_mt_degree(uint v) { _num_q = v; }
275 DiscoveredList* discovered_refs() { return _discovered_refs; }
277 ReferencePolicy* setup_policy(bool always_clear) {
278 _current_soft_ref_policy = always_clear ?
279 _always_clear_soft_ref_policy : _default_soft_ref_policy;
280 _current_soft_ref_policy->setup(); // snapshot the policy threshold
281 return _current_soft_ref_policy;
282 }
284 // Process references with a certain reachability level.
285 size_t process_discovered_reflist(DiscoveredList refs_lists[],
286 ReferencePolicy* policy,
287 bool clear_referent,
288 BoolObjectClosure* is_alive,
289 OopClosure* keep_alive,
290 VoidClosure* complete_gc,
291 AbstractRefProcTaskExecutor* task_executor);
293 void process_phaseJNI(BoolObjectClosure* is_alive,
294 OopClosure* keep_alive,
295 VoidClosure* complete_gc);
297 // Work methods used by the method process_discovered_reflist
298 // Phase1: keep alive all those referents that are otherwise
299 // dead but which must be kept alive by policy (and their closure).
300 void process_phase1(DiscoveredList& refs_list,
301 ReferencePolicy* policy,
302 BoolObjectClosure* is_alive,
303 OopClosure* keep_alive,
304 VoidClosure* complete_gc);
305 // Phase2: remove all those references whose referents are
306 // reachable.
307 inline void process_phase2(DiscoveredList& refs_list,
308 BoolObjectClosure* is_alive,
309 OopClosure* keep_alive,
310 VoidClosure* complete_gc) {
311 if (discovery_is_atomic()) {
312 // complete_gc is ignored in this case for this phase
313 pp2_work(refs_list, is_alive, keep_alive);
314 } else {
315 assert(complete_gc != NULL, "Error");
316 pp2_work_concurrent_discovery(refs_list, is_alive,
317 keep_alive, complete_gc);
318 }
319 }
320 // Work methods in support of process_phase2
321 void pp2_work(DiscoveredList& refs_list,
322 BoolObjectClosure* is_alive,
323 OopClosure* keep_alive);
324 void pp2_work_concurrent_discovery(
325 DiscoveredList& refs_list,
326 BoolObjectClosure* is_alive,
327 OopClosure* keep_alive,
328 VoidClosure* complete_gc);
329 // Phase3: process the referents by either clearing them
330 // or keeping them alive (and their closure)
331 void process_phase3(DiscoveredList& refs_list,
332 bool clear_referent,
333 BoolObjectClosure* is_alive,
334 OopClosure* keep_alive,
335 VoidClosure* complete_gc);
337 // Enqueue references with a certain reachability level
338 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
340 // "Preclean" all the discovered reference lists
341 // by removing references with strongly reachable referents.
342 // The first argument is a predicate on an oop that indicates
343 // its (strong) reachability and the second is a closure that
344 // may be used to incrementalize or abort the precleaning process.
345 // The caller is responsible for taking care of potential
346 // interference with concurrent operations on these lists
347 // (or predicates involved) by other threads. Currently
348 // only used by the CMS collector.
349 void preclean_discovered_references(BoolObjectClosure* is_alive,
350 OopClosure* keep_alive,
351 VoidClosure* complete_gc,
352 YieldClosure* yield,
353 GCTimer* gc_timer,
354 GCId gc_id);
356 // Delete entries in the discovered lists that have
357 // either a null referent or are not active. Such
358 // Reference objects can result from the clearing
359 // or enqueueing of Reference objects concurrent
360 // with their discovery by a (concurrent) collector.
361 // For a definition of "active" see java.lang.ref.Reference;
362 // Refs are born active, become inactive when enqueued,
363 // and never become active again. The state of being
364 // active is encoded as follows: A Ref is active
365 // if and only if its "next" field is NULL.
366 void clean_up_discovered_references();
367 void clean_up_discovered_reflist(DiscoveredList& refs_list);
369 // Returns the name of the discovered reference list
370 // occupying the i / _num_q slot.
371 const char* list_name(uint i);
373 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
375 protected:
376 // "Preclean" the given discovered reference list
377 // by removing references with strongly reachable referents.
378 // Currently used in support of CMS only.
379 void preclean_discovered_reflist(DiscoveredList& refs_list,
380 BoolObjectClosure* is_alive,
381 OopClosure* keep_alive,
382 VoidClosure* complete_gc,
383 YieldClosure* yield);
385 // round-robin mod _num_q (not: _not_ mode _max_num_q)
386 uint next_id() {
387 uint id = _next_id;
388 if (++_next_id == _num_q) {
389 _next_id = 0;
390 }
391 return id;
392 }
393 DiscoveredList* get_discovered_list(ReferenceType rt);
394 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
395 HeapWord* discovered_addr);
396 void verify_ok_to_handle_reflists() PRODUCT_RETURN;
398 void clear_discovered_references(DiscoveredList& refs_list);
399 void abandon_partial_discovered_list(DiscoveredList& refs_list);
401 // Calculate the number of jni handles.
402 unsigned int count_jni_refs();
404 // Balances reference queues.
405 void balance_queues(DiscoveredList ref_lists[]);
407 // Update (advance) the soft ref master clock field.
408 void update_soft_ref_master_clock();
410 public:
411 // Default parameters give you a vanilla reference processor.
412 ReferenceProcessor(MemRegion span,
413 bool mt_processing = false, uint mt_processing_degree = 1,
414 bool mt_discovery = false, uint mt_discovery_degree = 1,
415 bool atomic_discovery = true,
416 BoolObjectClosure* is_alive_non_header = NULL);
418 // RefDiscoveryPolicy values
419 enum DiscoveryPolicy {
420 ReferenceBasedDiscovery = 0,
421 ReferentBasedDiscovery = 1,
422 DiscoveryPolicyMin = ReferenceBasedDiscovery,
423 DiscoveryPolicyMax = ReferentBasedDiscovery
424 };
426 static void init_statics();
428 public:
429 // get and set "is_alive_non_header" field
430 BoolObjectClosure* is_alive_non_header() {
431 return _is_alive_non_header;
432 }
433 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
434 _is_alive_non_header = is_alive_non_header;
435 }
437 // get and set span
438 MemRegion span() { return _span; }
439 void set_span(MemRegion span) { _span = span; }
441 // start and stop weak ref discovery
442 void enable_discovery(bool verify_disabled, bool check_no_refs);
443 void disable_discovery() { _discovering_refs = false; }
444 bool discovery_enabled() { return _discovering_refs; }
446 // whether discovery is atomic wrt other collectors
447 bool discovery_is_atomic() const { return _discovery_is_atomic; }
448 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
450 // whether the JDK in which we are embedded is a pre-4965777 JDK,
451 // and thus whether or not it uses the discovered field to chain
452 // the entries in the pending list.
453 static bool pending_list_uses_discovered_field() {
454 return _pending_list_uses_discovered_field;
455 }
457 // whether discovery is done by multiple threads same-old-timeously
458 bool discovery_is_mt() const { return _discovery_is_mt; }
459 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
461 // Whether we are in a phase when _processing_ is MT.
462 bool processing_is_mt() const { return _processing_is_mt; }
463 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
465 // whether all enqueuing of weak references is complete
466 bool enqueuing_is_done() { return _enqueuing_is_done; }
467 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
469 // iterate over oops
470 void weak_oops_do(OopClosure* f); // weak roots
472 // Balance each of the discovered lists.
473 void balance_all_queues();
474 void verify_list(DiscoveredList& ref_list);
476 // Discover a Reference object, using appropriate discovery criteria
477 bool discover_reference(oop obj, ReferenceType rt);
479 // Process references found during GC (called by the garbage collector)
480 ReferenceProcessorStats
481 process_discovered_references(BoolObjectClosure* is_alive,
482 OopClosure* keep_alive,
483 VoidClosure* complete_gc,
484 AbstractRefProcTaskExecutor* task_executor,
485 GCTimer *gc_timer,
486 GCId gc_id);
488 // Enqueue references at end of GC (called by the garbage collector)
489 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
491 // If a discovery is in process that is being superceded, abandon it: all
492 // the discovered lists will be empty, and all the objects on them will
493 // have NULL discovered fields. Must be called only at a safepoint.
494 void abandon_partial_discovery();
496 // debugging
497 void verify_no_references_recorded() PRODUCT_RETURN;
498 void verify_referent(oop obj) PRODUCT_RETURN;
500 // clear the discovered lists (unlinking each entry).
501 void clear_discovered_references() PRODUCT_RETURN;
502 };
504 // A utility class to disable reference discovery in
505 // the scope which contains it, for given ReferenceProcessor.
506 class NoRefDiscovery: StackObj {
507 private:
508 ReferenceProcessor* _rp;
509 bool _was_discovering_refs;
510 public:
511 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
512 _was_discovering_refs = _rp->discovery_enabled();
513 if (_was_discovering_refs) {
514 _rp->disable_discovery();
515 }
516 }
518 ~NoRefDiscovery() {
519 if (_was_discovering_refs) {
520 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
521 }
522 }
523 };
526 // A utility class to temporarily mutate the span of the
527 // given ReferenceProcessor in the scope that contains it.
528 class ReferenceProcessorSpanMutator: StackObj {
529 private:
530 ReferenceProcessor* _rp;
531 MemRegion _saved_span;
533 public:
534 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
535 MemRegion span):
536 _rp(rp) {
537 _saved_span = _rp->span();
538 _rp->set_span(span);
539 }
541 ~ReferenceProcessorSpanMutator() {
542 _rp->set_span(_saved_span);
543 }
544 };
546 // A utility class to temporarily change the MT'ness of
547 // reference discovery for the given ReferenceProcessor
548 // in the scope that contains it.
549 class ReferenceProcessorMTDiscoveryMutator: StackObj {
550 private:
551 ReferenceProcessor* _rp;
552 bool _saved_mt;
554 public:
555 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
556 bool mt):
557 _rp(rp) {
558 _saved_mt = _rp->discovery_is_mt();
559 _rp->set_mt_discovery(mt);
560 }
562 ~ReferenceProcessorMTDiscoveryMutator() {
563 _rp->set_mt_discovery(_saved_mt);
564 }
565 };
568 // A utility class to temporarily change the disposition
569 // of the "is_alive_non_header" closure field of the
570 // given ReferenceProcessor in the scope that contains it.
571 class ReferenceProcessorIsAliveMutator: StackObj {
572 private:
573 ReferenceProcessor* _rp;
574 BoolObjectClosure* _saved_cl;
576 public:
577 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
578 BoolObjectClosure* cl):
579 _rp(rp) {
580 _saved_cl = _rp->is_alive_non_header();
581 _rp->set_is_alive_non_header(cl);
582 }
584 ~ReferenceProcessorIsAliveMutator() {
585 _rp->set_is_alive_non_header(_saved_cl);
586 }
587 };
589 // A utility class to temporarily change the disposition
590 // of the "discovery_is_atomic" field of the
591 // given ReferenceProcessor in the scope that contains it.
592 class ReferenceProcessorAtomicMutator: StackObj {
593 private:
594 ReferenceProcessor* _rp;
595 bool _saved_atomic_discovery;
597 public:
598 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
599 bool atomic):
600 _rp(rp) {
601 _saved_atomic_discovery = _rp->discovery_is_atomic();
602 _rp->set_atomic_discovery(atomic);
603 }
605 ~ReferenceProcessorAtomicMutator() {
606 _rp->set_atomic_discovery(_saved_atomic_discovery);
607 }
608 };
611 // A utility class to temporarily change the MT processing
612 // disposition of the given ReferenceProcessor instance
613 // in the scope that contains it.
614 class ReferenceProcessorMTProcMutator: StackObj {
615 private:
616 ReferenceProcessor* _rp;
617 bool _saved_mt;
619 public:
620 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
621 bool mt):
622 _rp(rp) {
623 _saved_mt = _rp->processing_is_mt();
624 _rp->set_mt_processing(mt);
625 }
627 ~ReferenceProcessorMTProcMutator() {
628 _rp->set_mt_processing(_saved_mt);
629 }
630 };
633 // This class is an interface used to implement task execution for the
634 // reference processing.
635 class AbstractRefProcTaskExecutor {
636 public:
638 // Abstract tasks to execute.
639 class ProcessTask;
640 class EnqueueTask;
642 // Executes a task using worker threads.
643 virtual void execute(ProcessTask& task) = 0;
644 virtual void execute(EnqueueTask& task) = 0;
646 // Switch to single threaded mode.
647 virtual void set_single_threaded_mode() { };
648 };
650 // Abstract reference processing task to execute.
651 class AbstractRefProcTaskExecutor::ProcessTask {
652 protected:
653 ProcessTask(ReferenceProcessor& ref_processor,
654 DiscoveredList refs_lists[],
655 bool marks_oops_alive)
656 : _ref_processor(ref_processor),
657 _refs_lists(refs_lists),
658 _marks_oops_alive(marks_oops_alive)
659 { }
661 public:
662 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
663 OopClosure& keep_alive,
664 VoidClosure& complete_gc) = 0;
666 // Returns true if a task marks some oops as alive.
667 bool marks_oops_alive() const
668 { return _marks_oops_alive; }
670 protected:
671 ReferenceProcessor& _ref_processor;
672 DiscoveredList* _refs_lists;
673 const bool _marks_oops_alive;
674 };
676 // Abstract reference processing task to execute.
677 class AbstractRefProcTaskExecutor::EnqueueTask {
678 protected:
679 EnqueueTask(ReferenceProcessor& ref_processor,
680 DiscoveredList refs_lists[],
681 HeapWord* pending_list_addr,
682 int n_queues)
683 : _ref_processor(ref_processor),
684 _refs_lists(refs_lists),
685 _pending_list_addr(pending_list_addr),
686 _n_queues(n_queues)
687 { }
689 public:
690 virtual void work(unsigned int work_id) = 0;
692 protected:
693 ReferenceProcessor& _ref_processor;
694 DiscoveredList* _refs_lists;
695 HeapWord* _pending_list_addr;
696 int _n_queues;
697 };
699 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP