|
1 /* |
|
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 * have any questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 // ReferenceProcessor class encapsulates the per-"collector" processing |
|
26 // of "weak" references for GC. The interface is useful for supporting |
|
27 // a generational abstraction, in particular when there are multiple |
|
28 // generations that are being independently collected -- possibly |
|
29 // concurrently and/or incrementally. Note, however, that the |
|
30 // ReferenceProcessor class abstracts away from a generational setting |
|
31 // by using only a heap interval (called "span" below), thus allowing |
|
32 // its use in a straightforward manner in a general, non-generational |
|
33 // setting. |
|
34 // |
|
35 // The basic idea is that each ReferenceProcessor object concerns |
|
36 // itself with ("weak") reference processing in a specific "span" |
|
37 // of the heap of interest to a specific collector. Currently, |
|
38 // the span is a convex interval of the heap, but, efficiency |
|
39 // apart, there seems to be no reason it couldn't be extended |
|
40 // (with appropriate modifications) to any "non-convex interval". |
|
41 |
|
42 // forward references |
|
43 class ReferencePolicy; |
|
44 class AbstractRefProcTaskExecutor; |
|
45 class DiscoveredList; |
|
46 |
|
47 class ReferenceProcessor : public CHeapObj { |
|
48 friend class DiscoveredList; |
|
49 friend class DiscoveredListIterator; |
|
50 protected: |
|
51 // End of list marker |
|
52 static oop _sentinelRef; |
|
53 MemRegion _span; // (right-open) interval of heap |
|
54 // subject to wkref discovery |
|
55 bool _discovering_refs; // true when discovery enabled |
|
56 bool _discovery_is_atomic; // if discovery is atomic wrt |
|
57 // other collectors in configuration |
|
58 bool _discovery_is_mt; // true if reference discovery is MT. |
|
59 bool _enqueuing_is_done; // true if all weak references enqueued |
|
60 bool _processing_is_mt; // true during phases when |
|
61 // reference processing is MT. |
|
62 int _next_id; // round-robin counter in |
|
63 // support of work distribution |
|
64 |
|
65 // For collectors that do not keep GC marking information |
|
66 // in the object header, this field holds a closure that |
|
67 // helps the reference processor determine the reachability |
|
68 // of an oop (the field is currently initialized to NULL for |
|
69 // all collectors but the CMS collector). |
|
70 BoolObjectClosure* _is_alive_non_header; |
|
71 |
|
72 // The discovered ref lists themselves |
|
73 int _num_q; // the MT'ness degree of the queues below |
|
74 DiscoveredList* _discoveredSoftRefs; // pointer to array of oops |
|
75 DiscoveredList* _discoveredWeakRefs; |
|
76 DiscoveredList* _discoveredFinalRefs; |
|
77 DiscoveredList* _discoveredPhantomRefs; |
|
78 |
|
79 public: |
|
80 int num_q() { return _num_q; } |
|
81 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } |
|
82 static oop* sentinel_ref() { return &_sentinelRef; } |
|
83 |
|
84 public: |
|
85 // Process references with a certain reachability level. |
|
86 void process_discovered_reflist(DiscoveredList refs_lists[], |
|
87 ReferencePolicy* policy, |
|
88 bool clear_referent, |
|
89 BoolObjectClosure* is_alive, |
|
90 OopClosure* keep_alive, |
|
91 VoidClosure* complete_gc, |
|
92 AbstractRefProcTaskExecutor* task_executor); |
|
93 |
|
94 void process_phaseJNI(BoolObjectClosure* is_alive, |
|
95 OopClosure* keep_alive, |
|
96 VoidClosure* complete_gc); |
|
97 |
|
98 // Work methods used by the method process_discovered_reflist |
|
99 // Phase1: keep alive all those referents that are otherwise |
|
100 // dead but which must be kept alive by policy (and their closure). |
|
101 void process_phase1(DiscoveredList& refs_list_addr, |
|
102 ReferencePolicy* policy, |
|
103 BoolObjectClosure* is_alive, |
|
104 OopClosure* keep_alive, |
|
105 VoidClosure* complete_gc); |
|
106 // Phase2: remove all those references whose referents are |
|
107 // reachable. |
|
108 inline void process_phase2(DiscoveredList& refs_list_addr, |
|
109 BoolObjectClosure* is_alive, |
|
110 OopClosure* keep_alive, |
|
111 VoidClosure* complete_gc) { |
|
112 if (discovery_is_atomic()) { |
|
113 // complete_gc is ignored in this case for this phase |
|
114 pp2_work(refs_list_addr, is_alive, keep_alive); |
|
115 } else { |
|
116 assert(complete_gc != NULL, "Error"); |
|
117 pp2_work_concurrent_discovery(refs_list_addr, is_alive, |
|
118 keep_alive, complete_gc); |
|
119 } |
|
120 } |
|
121 // Work methods in support of process_phase2 |
|
122 void pp2_work(DiscoveredList& refs_list_addr, |
|
123 BoolObjectClosure* is_alive, |
|
124 OopClosure* keep_alive); |
|
125 void pp2_work_concurrent_discovery( |
|
126 DiscoveredList& refs_list_addr, |
|
127 BoolObjectClosure* is_alive, |
|
128 OopClosure* keep_alive, |
|
129 VoidClosure* complete_gc); |
|
130 // Phase3: process the referents by either clearing them |
|
131 // or keeping them alive (and their closure) |
|
132 void process_phase3(DiscoveredList& refs_list_addr, |
|
133 bool clear_referent, |
|
134 BoolObjectClosure* is_alive, |
|
135 OopClosure* keep_alive, |
|
136 VoidClosure* complete_gc); |
|
137 |
|
138 // Enqueue references with a certain reachability level |
|
139 void enqueue_discovered_reflist(DiscoveredList& refs_list, oop* pending_list_addr); |
|
140 |
|
141 // "Preclean" all the discovered reference lists |
|
142 // by removing references with strongly reachable referents. |
|
143 // The first argument is a predicate on an oop that indicates |
|
144 // its (strong) reachability and the second is a closure that |
|
145 // may be used to incrementalize or abort the precleaning process. |
|
146 // The caller is responsible for taking care of potential |
|
147 // interference with concurrent operations on these lists |
|
148 // (or predicates involved) by other threads. Currently |
|
149 // only used by the CMS collector. |
|
150 void preclean_discovered_references(BoolObjectClosure* is_alive, |
|
151 OopClosure* keep_alive, |
|
152 VoidClosure* complete_gc, |
|
153 YieldClosure* yield); |
|
154 |
|
155 // Delete entries in the discovered lists that have |
|
156 // either a null referent or are not active. Such |
|
157 // Reference objects can result from the clearing |
|
158 // or enqueueing of Reference objects concurrent |
|
159 // with their discovery by a (concurrent) collector. |
|
160 // For a definition of "active" see java.lang.ref.Reference; |
|
161 // Refs are born active, become inactive when enqueued, |
|
162 // and never become active again. The state of being |
|
163 // active is encoded as follows: A Ref is active |
|
164 // if and only if its "next" field is NULL. |
|
165 void clean_up_discovered_references(); |
|
166 void clean_up_discovered_reflist(DiscoveredList& refs_list); |
|
167 |
|
168 // Returns the name of the discovered reference list |
|
169 // occupying the i / _num_q slot. |
|
170 const char* list_name(int i); |
|
171 |
|
172 protected: |
|
173 // "Preclean" the given discovered reference list |
|
174 // by removing references with strongly reachable referents. |
|
175 // Currently used in support of CMS only. |
|
176 void preclean_discovered_reflist(DiscoveredList& refs_list, |
|
177 BoolObjectClosure* is_alive, |
|
178 OopClosure* keep_alive, |
|
179 VoidClosure* complete_gc, |
|
180 YieldClosure* yield); |
|
181 |
|
182 void enqueue_discovered_reflists(oop* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); |
|
183 int next_id() { |
|
184 int id = _next_id; |
|
185 if (++_next_id == _num_q) { |
|
186 _next_id = 0; |
|
187 } |
|
188 return id; |
|
189 } |
|
190 DiscoveredList* get_discovered_list(ReferenceType rt); |
|
191 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, |
|
192 oop* discovered_addr); |
|
193 void verify_ok_to_handle_reflists() PRODUCT_RETURN; |
|
194 |
|
195 void abandon_partial_discovered_list(DiscoveredList& refs_list); |
|
196 void abandon_partial_discovered_list_arr(DiscoveredList refs_lists[]); |
|
197 |
|
198 // Calculate the number of jni handles. |
|
199 unsigned int count_jni_refs(); |
|
200 |
|
201 // Balances reference queues. |
|
202 void balance_queues(DiscoveredList ref_lists[]); |
|
203 |
|
204 // Update (advance) the soft ref master clock field. |
|
205 void update_soft_ref_master_clock(); |
|
206 |
|
207 public: |
|
208 // constructor |
|
209 ReferenceProcessor(): |
|
210 _span((HeapWord*)NULL, (HeapWord*)NULL), |
|
211 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL), |
|
212 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL), |
|
213 _discovering_refs(false), |
|
214 _discovery_is_atomic(true), |
|
215 _enqueuing_is_done(false), |
|
216 _discovery_is_mt(false), |
|
217 _is_alive_non_header(NULL), |
|
218 _num_q(0), |
|
219 _processing_is_mt(false), |
|
220 _next_id(0) |
|
221 {} |
|
222 |
|
223 ReferenceProcessor(MemRegion span, bool atomic_discovery, |
|
224 bool mt_discovery, int mt_degree = 1, |
|
225 bool mt_processing = false); |
|
226 |
|
227 // Allocates and initializes a reference processor. |
|
228 static ReferenceProcessor* create_ref_processor( |
|
229 MemRegion span, |
|
230 bool atomic_discovery, |
|
231 bool mt_discovery, |
|
232 BoolObjectClosure* is_alive_non_header = NULL, |
|
233 int parallel_gc_threads = 1, |
|
234 bool mt_processing = false); |
|
235 |
|
236 // RefDiscoveryPolicy values |
|
237 enum { |
|
238 ReferenceBasedDiscovery = 0, |
|
239 ReferentBasedDiscovery = 1 |
|
240 }; |
|
241 |
|
242 static void init_statics(); |
|
243 |
|
244 public: |
|
245 // get and set "is_alive_non_header" field |
|
246 BoolObjectClosure* is_alive_non_header() { |
|
247 return _is_alive_non_header; |
|
248 } |
|
249 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { |
|
250 _is_alive_non_header = is_alive_non_header; |
|
251 } |
|
252 |
|
253 // get and set span |
|
254 MemRegion span() { return _span; } |
|
255 void set_span(MemRegion span) { _span = span; } |
|
256 |
|
257 // start and stop weak ref discovery |
|
258 void enable_discovery() { _discovering_refs = true; } |
|
259 void disable_discovery() { _discovering_refs = false; } |
|
260 bool discovery_enabled() { return _discovering_refs; } |
|
261 |
|
262 // whether discovery is atomic wrt other collectors |
|
263 bool discovery_is_atomic() const { return _discovery_is_atomic; } |
|
264 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } |
|
265 |
|
266 // whether discovery is done by multiple threads same-old-timeously |
|
267 bool discovery_is_mt() const { return _discovery_is_mt; } |
|
268 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } |
|
269 |
|
270 // Whether we are in a phase when _processing_ is MT. |
|
271 bool processing_is_mt() const { return _processing_is_mt; } |
|
272 void set_mt_processing(bool mt) { _processing_is_mt = mt; } |
|
273 |
|
274 // whether all enqueuing of weak references is complete |
|
275 bool enqueuing_is_done() { return _enqueuing_is_done; } |
|
276 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } |
|
277 |
|
278 // iterate over oops |
|
279 void weak_oops_do(OopClosure* f); // weak roots |
|
280 static void oops_do(OopClosure* f); // strong root(s) |
|
281 |
|
282 // Discover a Reference object, using appropriate discovery criteria |
|
283 bool discover_reference(oop obj, ReferenceType rt); |
|
284 |
|
285 // Process references found during GC (called by the garbage collector) |
|
286 void process_discovered_references(ReferencePolicy* policy, |
|
287 BoolObjectClosure* is_alive, |
|
288 OopClosure* keep_alive, |
|
289 VoidClosure* complete_gc, |
|
290 AbstractRefProcTaskExecutor* task_executor); |
|
291 |
|
292 public: |
|
293 // Enqueue references at end of GC (called by the garbage collector) |
|
294 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); |
|
295 |
|
296 // debugging |
|
297 void verify_no_references_recorded() PRODUCT_RETURN; |
|
298 static void verify(); |
|
299 |
|
300 // clear the discovered lists (unlinking each entry). |
|
301 void clear_discovered_references() PRODUCT_RETURN; |
|
302 }; |
|
303 |
|
304 // A utility class to disable reference discovery in |
|
305 // the scope which contains it, for given ReferenceProcessor. |
|
306 class NoRefDiscovery: StackObj { |
|
307 private: |
|
308 ReferenceProcessor* _rp; |
|
309 bool _was_discovering_refs; |
|
310 public: |
|
311 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { |
|
312 if (_was_discovering_refs = _rp->discovery_enabled()) { |
|
313 _rp->disable_discovery(); |
|
314 } |
|
315 } |
|
316 |
|
317 ~NoRefDiscovery() { |
|
318 if (_was_discovering_refs) { |
|
319 _rp->enable_discovery(); |
|
320 } |
|
321 } |
|
322 }; |
|
323 |
|
324 |
|
325 // A utility class to temporarily mutate the span of the |
|
326 // given ReferenceProcessor in the scope that contains it. |
|
327 class ReferenceProcessorSpanMutator: StackObj { |
|
328 private: |
|
329 ReferenceProcessor* _rp; |
|
330 MemRegion _saved_span; |
|
331 |
|
332 public: |
|
333 ReferenceProcessorSpanMutator(ReferenceProcessor* rp, |
|
334 MemRegion span): |
|
335 _rp(rp) { |
|
336 _saved_span = _rp->span(); |
|
337 _rp->set_span(span); |
|
338 } |
|
339 |
|
340 ~ReferenceProcessorSpanMutator() { |
|
341 _rp->set_span(_saved_span); |
|
342 } |
|
343 }; |
|
344 |
|
345 // A utility class to temporarily change the MT'ness of |
|
346 // reference discovery for the given ReferenceProcessor |
|
347 // in the scope that contains it. |
|
348 class ReferenceProcessorMTMutator: StackObj { |
|
349 private: |
|
350 ReferenceProcessor* _rp; |
|
351 bool _saved_mt; |
|
352 |
|
353 public: |
|
354 ReferenceProcessorMTMutator(ReferenceProcessor* rp, |
|
355 bool mt): |
|
356 _rp(rp) { |
|
357 _saved_mt = _rp->discovery_is_mt(); |
|
358 _rp->set_mt_discovery(mt); |
|
359 } |
|
360 |
|
361 ~ReferenceProcessorMTMutator() { |
|
362 _rp->set_mt_discovery(_saved_mt); |
|
363 } |
|
364 }; |
|
365 |
|
366 |
|
367 // A utility class to temporarily change the disposition |
|
368 // of the "is_alive_non_header" closure field of the |
|
369 // given ReferenceProcessor in the scope that contains it. |
|
370 class ReferenceProcessorIsAliveMutator: StackObj { |
|
371 private: |
|
372 ReferenceProcessor* _rp; |
|
373 BoolObjectClosure* _saved_cl; |
|
374 |
|
375 public: |
|
376 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, |
|
377 BoolObjectClosure* cl): |
|
378 _rp(rp) { |
|
379 _saved_cl = _rp->is_alive_non_header(); |
|
380 _rp->set_is_alive_non_header(cl); |
|
381 } |
|
382 |
|
383 ~ReferenceProcessorIsAliveMutator() { |
|
384 _rp->set_is_alive_non_header(_saved_cl); |
|
385 } |
|
386 }; |
|
387 |
|
388 // A utility class to temporarily change the disposition |
|
389 // of the "discovery_is_atomic" field of the |
|
390 // given ReferenceProcessor in the scope that contains it. |
|
391 class ReferenceProcessorAtomicMutator: StackObj { |
|
392 private: |
|
393 ReferenceProcessor* _rp; |
|
394 bool _saved_atomic_discovery; |
|
395 |
|
396 public: |
|
397 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, |
|
398 bool atomic): |
|
399 _rp(rp) { |
|
400 _saved_atomic_discovery = _rp->discovery_is_atomic(); |
|
401 _rp->set_atomic_discovery(atomic); |
|
402 } |
|
403 |
|
404 ~ReferenceProcessorAtomicMutator() { |
|
405 _rp->set_atomic_discovery(_saved_atomic_discovery); |
|
406 } |
|
407 }; |
|
408 |
|
409 |
|
410 // A utility class to temporarily change the MT processing |
|
411 // disposition of the given ReferenceProcessor instance |
|
412 // in the scope that contains it. |
|
413 class ReferenceProcessorMTProcMutator: StackObj { |
|
414 private: |
|
415 ReferenceProcessor* _rp; |
|
416 bool _saved_mt; |
|
417 |
|
418 public: |
|
419 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, |
|
420 bool mt): |
|
421 _rp(rp) { |
|
422 _saved_mt = _rp->processing_is_mt(); |
|
423 _rp->set_mt_processing(mt); |
|
424 } |
|
425 |
|
426 ~ReferenceProcessorMTProcMutator() { |
|
427 _rp->set_mt_processing(_saved_mt); |
|
428 } |
|
429 }; |
|
430 |
|
431 |
|
432 // This class is an interface used to implement task execution for the |
|
433 // reference processing. |
|
434 class AbstractRefProcTaskExecutor { |
|
435 public: |
|
436 |
|
437 // Abstract tasks to execute. |
|
438 class ProcessTask; |
|
439 class EnqueueTask; |
|
440 |
|
441 // Executes a task using worker threads. |
|
442 virtual void execute(ProcessTask& task) = 0; |
|
443 virtual void execute(EnqueueTask& task) = 0; |
|
444 |
|
445 // Switch to single threaded mode. |
|
446 virtual void set_single_threaded_mode() { }; |
|
447 }; |
|
448 |
|
449 // Abstract reference processing task to execute. |
|
450 class AbstractRefProcTaskExecutor::ProcessTask { |
|
451 protected: |
|
452 ProcessTask(ReferenceProcessor& ref_processor, |
|
453 DiscoveredList refs_lists[], |
|
454 bool marks_oops_alive) |
|
455 : _ref_processor(ref_processor), |
|
456 _refs_lists(refs_lists), |
|
457 _marks_oops_alive(marks_oops_alive) |
|
458 { } |
|
459 |
|
460 public: |
|
461 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, |
|
462 OopClosure& keep_alive, |
|
463 VoidClosure& complete_gc) = 0; |
|
464 |
|
465 // Returns true if a task marks some oops as alive. |
|
466 bool marks_oops_alive() const |
|
467 { return _marks_oops_alive; } |
|
468 |
|
469 protected: |
|
470 ReferenceProcessor& _ref_processor; |
|
471 DiscoveredList* _refs_lists; |
|
472 const bool _marks_oops_alive; |
|
473 }; |
|
474 |
|
475 // Abstract reference processing task to execute. |
|
476 class AbstractRefProcTaskExecutor::EnqueueTask { |
|
477 protected: |
|
478 EnqueueTask(ReferenceProcessor& ref_processor, |
|
479 DiscoveredList refs_lists[], |
|
480 oop* pending_list_addr, |
|
481 oop sentinel_ref, |
|
482 int n_queues) |
|
483 : _ref_processor(ref_processor), |
|
484 _refs_lists(refs_lists), |
|
485 _pending_list_addr(pending_list_addr), |
|
486 _sentinel_ref(sentinel_ref), |
|
487 _n_queues(n_queues) |
|
488 { } |
|
489 |
|
490 public: |
|
491 virtual void work(unsigned int work_id) = 0; |
|
492 |
|
493 protected: |
|
494 ReferenceProcessor& _ref_processor; |
|
495 DiscoveredList* _refs_lists; |
|
496 oop* _pending_list_addr; |
|
497 oop _sentinel_ref; |
|
498 int _n_queues; |
|
499 }; |