|
1 /* |
|
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP |
|
26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP |
|
27 |
|
28 #include "memory/referencePolicy.hpp" |
|
29 #include "memory/referenceProcessorStats.hpp" |
|
30 #include "memory/referenceType.hpp" |
|
31 #include "oops/instanceRefKlass.hpp" |
|
32 |
|
33 class GCTimer; |
|
34 |
|
35 // ReferenceProcessor class encapsulates the per-"collector" processing |
|
36 // of java.lang.Reference objects for GC. The interface is useful for supporting |
|
37 // a generational abstraction, in particular when there are multiple |
|
38 // generations that are being independently collected -- possibly |
|
39 // concurrently and/or incrementally. Note, however, that the |
|
40 // ReferenceProcessor class abstracts away from a generational setting |
|
41 // by using only a heap interval (called "span" below), thus allowing |
|
42 // its use in a straightforward manner in a general, non-generational |
|
43 // setting. |
|
44 // |
|
45 // The basic idea is that each ReferenceProcessor object concerns |
|
46 // itself with ("weak") reference processing in a specific "span" |
|
47 // of the heap of interest to a specific collector. Currently, |
|
48 // the span is a convex interval of the heap, but, efficiency |
|
49 // apart, there seems to be no reason it couldn't be extended |
|
50 // (with appropriate modifications) to any "non-convex interval". |
|
51 |
|
52 // forward references |
|
53 class ReferencePolicy; |
|
54 class AbstractRefProcTaskExecutor; |
|
55 |
|
56 // List of discovered references. |
|
57 class DiscoveredList { |
|
58 public: |
|
59 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } |
|
60 oop head() const { |
|
61 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : |
|
62 _oop_head; |
|
63 } |
|
64 HeapWord* adr_head() { |
|
65 return UseCompressedOops ? (HeapWord*)&_compressed_head : |
|
66 (HeapWord*)&_oop_head; |
|
67 } |
|
68 void set_head(oop o) { |
|
69 if (UseCompressedOops) { |
|
70 // Must compress the head ptr. |
|
71 _compressed_head = oopDesc::encode_heap_oop(o); |
|
72 } else { |
|
73 _oop_head = o; |
|
74 } |
|
75 } |
|
76 bool is_empty() const { return head() == NULL; } |
|
77 size_t length() { return _len; } |
|
78 void set_length(size_t len) { _len = len; } |
|
79 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } |
|
80 void dec_length(size_t dec) { _len -= dec; } |
|
81 private: |
|
82 // Set value depending on UseCompressedOops. This could be a template class |
|
83 // but then we have to fix all the instantiations and declarations that use this class. |
|
84 oop _oop_head; |
|
85 narrowOop _compressed_head; |
|
86 size_t _len; |
|
87 }; |
|
88 |
|
89 // Iterator for the list of discovered references. |
|
90 class DiscoveredListIterator { |
|
91 private: |
|
92 DiscoveredList& _refs_list; |
|
93 HeapWord* _prev_next; |
|
94 oop _prev; |
|
95 oop _ref; |
|
96 HeapWord* _discovered_addr; |
|
97 oop _next; |
|
98 HeapWord* _referent_addr; |
|
99 oop _referent; |
|
100 OopClosure* _keep_alive; |
|
101 BoolObjectClosure* _is_alive; |
|
102 |
|
103 DEBUG_ONLY( |
|
104 oop _first_seen; // cyclic linked list check |
|
105 ) |
|
106 |
|
107 NOT_PRODUCT( |
|
108 size_t _processed; |
|
109 size_t _removed; |
|
110 ) |
|
111 |
|
112 public: |
|
113 inline DiscoveredListIterator(DiscoveredList& refs_list, |
|
114 OopClosure* keep_alive, |
|
115 BoolObjectClosure* is_alive): |
|
116 _refs_list(refs_list), |
|
117 _prev_next(refs_list.adr_head()), |
|
118 _prev(NULL), |
|
119 _ref(refs_list.head()), |
|
120 #ifdef ASSERT |
|
121 _first_seen(refs_list.head()), |
|
122 #endif |
|
123 #ifndef PRODUCT |
|
124 _processed(0), |
|
125 _removed(0), |
|
126 #endif |
|
127 _next(NULL), |
|
128 _keep_alive(keep_alive), |
|
129 _is_alive(is_alive) |
|
130 { } |
|
131 |
|
132 // End Of List. |
|
133 inline bool has_next() const { return _ref != NULL; } |
|
134 |
|
135 // Get oop to the Reference object. |
|
136 inline oop obj() const { return _ref; } |
|
137 |
|
138 // Get oop to the referent object. |
|
139 inline oop referent() const { return _referent; } |
|
140 |
|
141 // Returns true if referent is alive. |
|
142 inline bool is_referent_alive() const { |
|
143 return _is_alive->do_object_b(_referent); |
|
144 } |
|
145 |
|
146 // Loads data for the current reference. |
|
147 // The "allow_null_referent" argument tells us to allow for the possibility |
|
148 // of a NULL referent in the discovered Reference object. This typically |
|
149 // happens in the case of concurrent collectors that may have done the |
|
150 // discovery concurrently, or interleaved, with mutator execution. |
|
151 void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); |
|
152 |
|
153 // Move to the next discovered reference. |
|
154 inline void next() { |
|
155 _prev_next = _discovered_addr; |
|
156 _prev = _ref; |
|
157 move_to_next(); |
|
158 } |
|
159 |
|
160 // Remove the current reference from the list |
|
161 void remove(); |
|
162 |
|
163 // Make the Reference object active again. |
|
164 void make_active(); |
|
165 |
|
166 // Make the referent alive. |
|
167 inline void make_referent_alive() { |
|
168 if (UseCompressedOops) { |
|
169 _keep_alive->do_oop((narrowOop*)_referent_addr); |
|
170 } else { |
|
171 _keep_alive->do_oop((oop*)_referent_addr); |
|
172 } |
|
173 } |
|
174 |
|
175 // Update the discovered field. |
|
176 inline void update_discovered() { |
|
177 // First _prev_next ref actually points into DiscoveredList (gross). |
|
178 if (UseCompressedOops) { |
|
179 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { |
|
180 _keep_alive->do_oop((narrowOop*)_prev_next); |
|
181 } |
|
182 } else { |
|
183 if (!oopDesc::is_null(*(oop*)_prev_next)) { |
|
184 _keep_alive->do_oop((oop*)_prev_next); |
|
185 } |
|
186 } |
|
187 } |
|
188 |
|
189 // NULL out referent pointer. |
|
190 void clear_referent(); |
|
191 |
|
192 // Statistics |
|
193 NOT_PRODUCT( |
|
194 inline size_t processed() const { return _processed; } |
|
195 inline size_t removed() const { return _removed; } |
|
196 ) |
|
197 |
|
198 inline void move_to_next() { |
|
199 if (_ref == _next) { |
|
200 // End of the list. |
|
201 _ref = NULL; |
|
202 } else { |
|
203 _ref = _next; |
|
204 } |
|
205 assert(_ref != _first_seen, "cyclic ref_list found"); |
|
206 NOT_PRODUCT(_processed++); |
|
207 } |
|
208 }; |
|
209 |
|
210 class ReferenceProcessor : public CHeapObj<mtGC> { |
|
211 |
|
212 private: |
|
213 size_t total_count(DiscoveredList lists[]); |
|
214 |
|
215 protected: |
|
216 // Compatibility with pre-4965777 JDK's |
|
217 static bool _pending_list_uses_discovered_field; |
|
218 |
|
219 // The SoftReference master timestamp clock |
|
220 static jlong _soft_ref_timestamp_clock; |
|
221 |
|
222 MemRegion _span; // (right-open) interval of heap |
|
223 // subject to wkref discovery |
|
224 |
|
225 bool _discovering_refs; // true when discovery enabled |
|
226 bool _discovery_is_atomic; // if discovery is atomic wrt |
|
227 // other collectors in configuration |
|
228 bool _discovery_is_mt; // true if reference discovery is MT. |
|
229 |
|
230 bool _enqueuing_is_done; // true if all weak references enqueued |
|
231 bool _processing_is_mt; // true during phases when |
|
232 // reference processing is MT. |
|
233 uint _next_id; // round-robin mod _num_q counter in |
|
234 // support of work distribution |
|
235 |
|
236 // For collectors that do not keep GC liveness information |
|
237 // in the object header, this field holds a closure that |
|
238 // helps the reference processor determine the reachability |
|
239 // of an oop. It is currently initialized to NULL for all |
|
240 // collectors except for CMS and G1. |
|
241 BoolObjectClosure* _is_alive_non_header; |
|
242 |
|
243 // Soft ref clearing policies |
|
244 // . the default policy |
|
245 static ReferencePolicy* _default_soft_ref_policy; |
|
246 // . the "clear all" policy |
|
247 static ReferencePolicy* _always_clear_soft_ref_policy; |
|
248 // . the current policy below is either one of the above |
|
249 ReferencePolicy* _current_soft_ref_policy; |
|
250 |
|
251 // The discovered ref lists themselves |
|
252 |
|
253 // The active MT'ness degree of the queues below |
|
254 uint _num_q; |
|
255 // The maximum MT'ness degree of the queues below |
|
256 uint _max_num_q; |
|
257 |
|
258 // Master array of discovered oops |
|
259 DiscoveredList* _discovered_refs; |
|
260 |
|
261 // Arrays of lists of oops, one per thread (pointers into master array above) |
|
262 DiscoveredList* _discoveredSoftRefs; |
|
263 DiscoveredList* _discoveredWeakRefs; |
|
264 DiscoveredList* _discoveredFinalRefs; |
|
265 DiscoveredList* _discoveredPhantomRefs; |
|
266 |
|
267 public: |
|
268 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } |
|
269 |
|
270 uint num_q() { return _num_q; } |
|
271 uint max_num_q() { return _max_num_q; } |
|
272 void set_active_mt_degree(uint v) { _num_q = v; } |
|
273 |
|
274 DiscoveredList* discovered_refs() { return _discovered_refs; } |
|
275 |
|
276 ReferencePolicy* setup_policy(bool always_clear) { |
|
277 _current_soft_ref_policy = always_clear ? |
|
278 _always_clear_soft_ref_policy : _default_soft_ref_policy; |
|
279 _current_soft_ref_policy->setup(); // snapshot the policy threshold |
|
280 return _current_soft_ref_policy; |
|
281 } |
|
282 |
|
283 // Process references with a certain reachability level. |
|
284 size_t process_discovered_reflist(DiscoveredList refs_lists[], |
|
285 ReferencePolicy* policy, |
|
286 bool clear_referent, |
|
287 BoolObjectClosure* is_alive, |
|
288 OopClosure* keep_alive, |
|
289 VoidClosure* complete_gc, |
|
290 AbstractRefProcTaskExecutor* task_executor); |
|
291 |
|
292 void process_phaseJNI(BoolObjectClosure* is_alive, |
|
293 OopClosure* keep_alive, |
|
294 VoidClosure* complete_gc); |
|
295 |
|
296 // Work methods used by the method process_discovered_reflist |
|
297 // Phase1: keep alive all those referents that are otherwise |
|
298 // dead but which must be kept alive by policy (and their closure). |
|
299 void process_phase1(DiscoveredList& refs_list, |
|
300 ReferencePolicy* policy, |
|
301 BoolObjectClosure* is_alive, |
|
302 OopClosure* keep_alive, |
|
303 VoidClosure* complete_gc); |
|
304 // Phase2: remove all those references whose referents are |
|
305 // reachable. |
|
306 inline void process_phase2(DiscoveredList& refs_list, |
|
307 BoolObjectClosure* is_alive, |
|
308 OopClosure* keep_alive, |
|
309 VoidClosure* complete_gc) { |
|
310 if (discovery_is_atomic()) { |
|
311 // complete_gc is ignored in this case for this phase |
|
312 pp2_work(refs_list, is_alive, keep_alive); |
|
313 } else { |
|
314 assert(complete_gc != NULL, "Error"); |
|
315 pp2_work_concurrent_discovery(refs_list, is_alive, |
|
316 keep_alive, complete_gc); |
|
317 } |
|
318 } |
|
319 // Work methods in support of process_phase2 |
|
320 void pp2_work(DiscoveredList& refs_list, |
|
321 BoolObjectClosure* is_alive, |
|
322 OopClosure* keep_alive); |
|
323 void pp2_work_concurrent_discovery( |
|
324 DiscoveredList& refs_list, |
|
325 BoolObjectClosure* is_alive, |
|
326 OopClosure* keep_alive, |
|
327 VoidClosure* complete_gc); |
|
328 // Phase3: process the referents by either clearing them |
|
329 // or keeping them alive (and their closure) |
|
330 void process_phase3(DiscoveredList& refs_list, |
|
331 bool clear_referent, |
|
332 BoolObjectClosure* is_alive, |
|
333 OopClosure* keep_alive, |
|
334 VoidClosure* complete_gc); |
|
335 |
|
336 // Enqueue references with a certain reachability level |
|
337 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); |
|
338 |
|
339 // "Preclean" all the discovered reference lists |
|
340 // by removing references with strongly reachable referents. |
|
341 // The first argument is a predicate on an oop that indicates |
|
342 // its (strong) reachability and the second is a closure that |
|
343 // may be used to incrementalize or abort the precleaning process. |
|
344 // The caller is responsible for taking care of potential |
|
345 // interference with concurrent operations on these lists |
|
346 // (or predicates involved) by other threads. Currently |
|
347 // only used by the CMS collector. |
|
348 void preclean_discovered_references(BoolObjectClosure* is_alive, |
|
349 OopClosure* keep_alive, |
|
350 VoidClosure* complete_gc, |
|
351 YieldClosure* yield, |
|
352 GCTimer* gc_timer); |
|
353 |
|
354 // Delete entries in the discovered lists that have |
|
355 // either a null referent or are not active. Such |
|
356 // Reference objects can result from the clearing |
|
357 // or enqueueing of Reference objects concurrent |
|
358 // with their discovery by a (concurrent) collector. |
|
359 // For a definition of "active" see java.lang.ref.Reference; |
|
360 // Refs are born active, become inactive when enqueued, |
|
361 // and never become active again. The state of being |
|
362 // active is encoded as follows: A Ref is active |
|
363 // if and only if its "next" field is NULL. |
|
364 void clean_up_discovered_references(); |
|
365 void clean_up_discovered_reflist(DiscoveredList& refs_list); |
|
366 |
|
367 // Returns the name of the discovered reference list |
|
368 // occupying the i / _num_q slot. |
|
369 const char* list_name(uint i); |
|
370 |
|
371 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); |
|
372 |
|
373 protected: |
|
374 // "Preclean" the given discovered reference list |
|
375 // by removing references with strongly reachable referents. |
|
376 // Currently used in support of CMS only. |
|
377 void preclean_discovered_reflist(DiscoveredList& refs_list, |
|
378 BoolObjectClosure* is_alive, |
|
379 OopClosure* keep_alive, |
|
380 VoidClosure* complete_gc, |
|
381 YieldClosure* yield); |
|
382 |
|
383 // round-robin mod _num_q (not: _not_ mode _max_num_q) |
|
384 uint next_id() { |
|
385 uint id = _next_id; |
|
386 if (++_next_id == _num_q) { |
|
387 _next_id = 0; |
|
388 } |
|
389 return id; |
|
390 } |
|
391 DiscoveredList* get_discovered_list(ReferenceType rt); |
|
392 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, |
|
393 HeapWord* discovered_addr); |
|
394 void verify_ok_to_handle_reflists() PRODUCT_RETURN; |
|
395 |
|
396 void clear_discovered_references(DiscoveredList& refs_list); |
|
397 void abandon_partial_discovered_list(DiscoveredList& refs_list); |
|
398 |
|
399 // Calculate the number of jni handles. |
|
400 unsigned int count_jni_refs(); |
|
401 |
|
402 // Balances reference queues. |
|
403 void balance_queues(DiscoveredList ref_lists[]); |
|
404 |
|
405 // Update (advance) the soft ref master clock field. |
|
406 void update_soft_ref_master_clock(); |
|
407 |
|
408 public: |
|
409 // Default parameters give you a vanilla reference processor. |
|
410 ReferenceProcessor(MemRegion span, |
|
411 bool mt_processing = false, uint mt_processing_degree = 1, |
|
412 bool mt_discovery = false, uint mt_discovery_degree = 1, |
|
413 bool atomic_discovery = true, |
|
414 BoolObjectClosure* is_alive_non_header = NULL); |
|
415 |
|
416 // RefDiscoveryPolicy values |
|
417 enum DiscoveryPolicy { |
|
418 ReferenceBasedDiscovery = 0, |
|
419 ReferentBasedDiscovery = 1, |
|
420 DiscoveryPolicyMin = ReferenceBasedDiscovery, |
|
421 DiscoveryPolicyMax = ReferentBasedDiscovery |
|
422 }; |
|
423 |
|
424 static void init_statics(); |
|
425 |
|
426 public: |
|
427 // get and set "is_alive_non_header" field |
|
428 BoolObjectClosure* is_alive_non_header() { |
|
429 return _is_alive_non_header; |
|
430 } |
|
431 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { |
|
432 _is_alive_non_header = is_alive_non_header; |
|
433 } |
|
434 |
|
435 // get and set span |
|
436 MemRegion span() { return _span; } |
|
437 void set_span(MemRegion span) { _span = span; } |
|
438 |
|
439 // start and stop weak ref discovery |
|
440 void enable_discovery(bool verify_disabled, bool check_no_refs); |
|
441 void disable_discovery() { _discovering_refs = false; } |
|
442 bool discovery_enabled() { return _discovering_refs; } |
|
443 |
|
444 // whether discovery is atomic wrt other collectors |
|
445 bool discovery_is_atomic() const { return _discovery_is_atomic; } |
|
446 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } |
|
447 |
|
448 // whether the JDK in which we are embedded is a pre-4965777 JDK, |
|
449 // and thus whether or not it uses the discovered field to chain |
|
450 // the entries in the pending list. |
|
451 static bool pending_list_uses_discovered_field() { |
|
452 return _pending_list_uses_discovered_field; |
|
453 } |
|
454 |
|
455 // whether discovery is done by multiple threads same-old-timeously |
|
456 bool discovery_is_mt() const { return _discovery_is_mt; } |
|
457 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } |
|
458 |
|
459 // Whether we are in a phase when _processing_ is MT. |
|
460 bool processing_is_mt() const { return _processing_is_mt; } |
|
461 void set_mt_processing(bool mt) { _processing_is_mt = mt; } |
|
462 |
|
463 // whether all enqueuing of weak references is complete |
|
464 bool enqueuing_is_done() { return _enqueuing_is_done; } |
|
465 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } |
|
466 |
|
467 // iterate over oops |
|
468 void weak_oops_do(OopClosure* f); // weak roots |
|
469 |
|
470 // Balance each of the discovered lists. |
|
471 void balance_all_queues(); |
|
472 void verify_list(DiscoveredList& ref_list); |
|
473 |
|
474 // Discover a Reference object, using appropriate discovery criteria |
|
475 bool discover_reference(oop obj, ReferenceType rt); |
|
476 |
|
477 // Process references found during GC (called by the garbage collector) |
|
478 ReferenceProcessorStats |
|
479 process_discovered_references(BoolObjectClosure* is_alive, |
|
480 OopClosure* keep_alive, |
|
481 VoidClosure* complete_gc, |
|
482 AbstractRefProcTaskExecutor* task_executor, |
|
483 GCTimer *gc_timer); |
|
484 |
|
485 // Enqueue references at end of GC (called by the garbage collector) |
|
486 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); |
|
487 |
|
488 // If a discovery is in process that is being superceded, abandon it: all |
|
489 // the discovered lists will be empty, and all the objects on them will |
|
490 // have NULL discovered fields. Must be called only at a safepoint. |
|
491 void abandon_partial_discovery(); |
|
492 |
|
493 // debugging |
|
494 void verify_no_references_recorded() PRODUCT_RETURN; |
|
495 void verify_referent(oop obj) PRODUCT_RETURN; |
|
496 |
|
497 // clear the discovered lists (unlinking each entry). |
|
498 void clear_discovered_references() PRODUCT_RETURN; |
|
499 }; |
|
500 |
|
501 // A utility class to disable reference discovery in |
|
502 // the scope which contains it, for given ReferenceProcessor. |
|
503 class NoRefDiscovery: StackObj { |
|
504 private: |
|
505 ReferenceProcessor* _rp; |
|
506 bool _was_discovering_refs; |
|
507 public: |
|
508 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { |
|
509 _was_discovering_refs = _rp->discovery_enabled(); |
|
510 if (_was_discovering_refs) { |
|
511 _rp->disable_discovery(); |
|
512 } |
|
513 } |
|
514 |
|
515 ~NoRefDiscovery() { |
|
516 if (_was_discovering_refs) { |
|
517 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/); |
|
518 } |
|
519 } |
|
520 }; |
|
521 |
|
522 |
|
523 // A utility class to temporarily mutate the span of the |
|
524 // given ReferenceProcessor in the scope that contains it. |
|
525 class ReferenceProcessorSpanMutator: StackObj { |
|
526 private: |
|
527 ReferenceProcessor* _rp; |
|
528 MemRegion _saved_span; |
|
529 |
|
530 public: |
|
531 ReferenceProcessorSpanMutator(ReferenceProcessor* rp, |
|
532 MemRegion span): |
|
533 _rp(rp) { |
|
534 _saved_span = _rp->span(); |
|
535 _rp->set_span(span); |
|
536 } |
|
537 |
|
538 ~ReferenceProcessorSpanMutator() { |
|
539 _rp->set_span(_saved_span); |
|
540 } |
|
541 }; |
|
542 |
|
543 // A utility class to temporarily change the MT'ness of |
|
544 // reference discovery for the given ReferenceProcessor |
|
545 // in the scope that contains it. |
|
546 class ReferenceProcessorMTDiscoveryMutator: StackObj { |
|
547 private: |
|
548 ReferenceProcessor* _rp; |
|
549 bool _saved_mt; |
|
550 |
|
551 public: |
|
552 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, |
|
553 bool mt): |
|
554 _rp(rp) { |
|
555 _saved_mt = _rp->discovery_is_mt(); |
|
556 _rp->set_mt_discovery(mt); |
|
557 } |
|
558 |
|
559 ~ReferenceProcessorMTDiscoveryMutator() { |
|
560 _rp->set_mt_discovery(_saved_mt); |
|
561 } |
|
562 }; |
|
563 |
|
564 |
|
565 // A utility class to temporarily change the disposition |
|
566 // of the "is_alive_non_header" closure field of the |
|
567 // given ReferenceProcessor in the scope that contains it. |
|
568 class ReferenceProcessorIsAliveMutator: StackObj { |
|
569 private: |
|
570 ReferenceProcessor* _rp; |
|
571 BoolObjectClosure* _saved_cl; |
|
572 |
|
573 public: |
|
574 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, |
|
575 BoolObjectClosure* cl): |
|
576 _rp(rp) { |
|
577 _saved_cl = _rp->is_alive_non_header(); |
|
578 _rp->set_is_alive_non_header(cl); |
|
579 } |
|
580 |
|
581 ~ReferenceProcessorIsAliveMutator() { |
|
582 _rp->set_is_alive_non_header(_saved_cl); |
|
583 } |
|
584 }; |
|
585 |
|
586 // A utility class to temporarily change the disposition |
|
587 // of the "discovery_is_atomic" field of the |
|
588 // given ReferenceProcessor in the scope that contains it. |
|
589 class ReferenceProcessorAtomicMutator: StackObj { |
|
590 private: |
|
591 ReferenceProcessor* _rp; |
|
592 bool _saved_atomic_discovery; |
|
593 |
|
594 public: |
|
595 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, |
|
596 bool atomic): |
|
597 _rp(rp) { |
|
598 _saved_atomic_discovery = _rp->discovery_is_atomic(); |
|
599 _rp->set_atomic_discovery(atomic); |
|
600 } |
|
601 |
|
602 ~ReferenceProcessorAtomicMutator() { |
|
603 _rp->set_atomic_discovery(_saved_atomic_discovery); |
|
604 } |
|
605 }; |
|
606 |
|
607 |
|
608 // A utility class to temporarily change the MT processing |
|
609 // disposition of the given ReferenceProcessor instance |
|
610 // in the scope that contains it. |
|
611 class ReferenceProcessorMTProcMutator: StackObj { |
|
612 private: |
|
613 ReferenceProcessor* _rp; |
|
614 bool _saved_mt; |
|
615 |
|
616 public: |
|
617 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, |
|
618 bool mt): |
|
619 _rp(rp) { |
|
620 _saved_mt = _rp->processing_is_mt(); |
|
621 _rp->set_mt_processing(mt); |
|
622 } |
|
623 |
|
624 ~ReferenceProcessorMTProcMutator() { |
|
625 _rp->set_mt_processing(_saved_mt); |
|
626 } |
|
627 }; |
|
628 |
|
629 |
|
630 // This class is an interface used to implement task execution for the |
|
631 // reference processing. |
|
632 class AbstractRefProcTaskExecutor { |
|
633 public: |
|
634 |
|
635 // Abstract tasks to execute. |
|
636 class ProcessTask; |
|
637 class EnqueueTask; |
|
638 |
|
639 // Executes a task using worker threads. |
|
640 virtual void execute(ProcessTask& task) = 0; |
|
641 virtual void execute(EnqueueTask& task) = 0; |
|
642 |
|
643 // Switch to single threaded mode. |
|
644 virtual void set_single_threaded_mode() { }; |
|
645 }; |
|
646 |
|
647 // Abstract reference processing task to execute. |
|
648 class AbstractRefProcTaskExecutor::ProcessTask { |
|
649 protected: |
|
650 ProcessTask(ReferenceProcessor& ref_processor, |
|
651 DiscoveredList refs_lists[], |
|
652 bool marks_oops_alive) |
|
653 : _ref_processor(ref_processor), |
|
654 _refs_lists(refs_lists), |
|
655 _marks_oops_alive(marks_oops_alive) |
|
656 { } |
|
657 |
|
658 public: |
|
659 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, |
|
660 OopClosure& keep_alive, |
|
661 VoidClosure& complete_gc) = 0; |
|
662 |
|
663 // Returns true if a task marks some oops as alive. |
|
664 bool marks_oops_alive() const |
|
665 { return _marks_oops_alive; } |
|
666 |
|
667 protected: |
|
668 ReferenceProcessor& _ref_processor; |
|
669 DiscoveredList* _refs_lists; |
|
670 const bool _marks_oops_alive; |
|
671 }; |
|
672 |
|
673 // Abstract reference processing task to execute. |
|
674 class AbstractRefProcTaskExecutor::EnqueueTask { |
|
675 protected: |
|
676 EnqueueTask(ReferenceProcessor& ref_processor, |
|
677 DiscoveredList refs_lists[], |
|
678 HeapWord* pending_list_addr, |
|
679 int n_queues) |
|
680 : _ref_processor(ref_processor), |
|
681 _refs_lists(refs_lists), |
|
682 _pending_list_addr(pending_list_addr), |
|
683 _n_queues(n_queues) |
|
684 { } |
|
685 |
|
686 public: |
|
687 virtual void work(unsigned int work_id) = 0; |
|
688 |
|
689 protected: |
|
690 ReferenceProcessor& _ref_processor; |
|
691 DiscoveredList* _refs_lists; |
|
692 HeapWord* _pending_list_addr; |
|
693 int _n_queues; |
|
694 }; |
|
695 |
|
696 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP |