Thu, 11 Dec 2008 12:05:08 -0800
6578152: fill_region_with_object has usability and safety issues
Reviewed-by: apetrusenko, ysr
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // ReferenceProcessor class encapsulates the per-"collector" processing
26 // of java.lang.Reference objects for GC. The interface is useful for supporting
27 // a generational abstraction, in particular when there are multiple
28 // generations that are being independently collected -- possibly
29 // concurrently and/or incrementally. Note, however, that the
30 // ReferenceProcessor class abstracts away from a generational setting
31 // by using only a heap interval (called "span" below), thus allowing
32 // its use in a straightforward manner in a general, non-generational
33 // setting.
34 //
35 // The basic idea is that each ReferenceProcessor object concerns
36 // itself with ("weak") reference processing in a specific "span"
37 // of the heap of interest to a specific collector. Currently,
38 // the span is a convex interval of the heap, but, efficiency
39 // apart, there seems to be no reason it couldn't be extended
40 // (with appropriate modifications) to any "non-convex interval".
42 // forward references
43 class ReferencePolicy;
44 class AbstractRefProcTaskExecutor;
45 class DiscoveredList;
47 class ReferenceProcessor : public CHeapObj {
48 protected:
49 // End of list marker
50 static oop _sentinelRef;
51 MemRegion _span; // (right-open) interval of heap
52 // subject to wkref discovery
53 bool _discovering_refs; // true when discovery enabled
54 bool _discovery_is_atomic; // if discovery is atomic wrt
55 // other collectors in configuration
56 bool _discovery_is_mt; // true if reference discovery is MT.
57 // If true, setting "next" field of a discovered refs list requires
58 // write barrier(s). (Must be true if used in a collector in which
59 // elements of a discovered list may be moved during discovery: for
60 // example, a collector like Garbage-First that moves objects during a
61 // long-term concurrent marking phase that does weak reference
62 // discovery.)
63 bool _discovered_list_needs_barrier;
64 BarrierSet* _bs; // Cached copy of BarrierSet.
65 bool _enqueuing_is_done; // true if all weak references enqueued
66 bool _processing_is_mt; // true during phases when
67 // reference processing is MT.
68 int _next_id; // round-robin counter in
69 // support of work distribution
71 // For collectors that do not keep GC marking information
72 // in the object header, this field holds a closure that
73 // helps the reference processor determine the reachability
74 // of an oop (the field is currently initialized to NULL for
75 // all collectors but the CMS collector).
76 BoolObjectClosure* _is_alive_non_header;
78 // Soft ref clearing policies
79 // . the default policy
80 static ReferencePolicy* _default_soft_ref_policy;
81 // . the "clear all" policy
82 static ReferencePolicy* _always_clear_soft_ref_policy;
83 // . the current policy below is either one of the above
84 ReferencePolicy* _current_soft_ref_policy;
86 // The discovered ref lists themselves
88 // The MT'ness degree of the queues below
89 int _num_q;
90 // Arrays of lists of oops, one per thread
91 DiscoveredList* _discoveredSoftRefs;
92 DiscoveredList* _discoveredWeakRefs;
93 DiscoveredList* _discoveredFinalRefs;
94 DiscoveredList* _discoveredPhantomRefs;
96 public:
97 int num_q() { return _num_q; }
98 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
99 static oop sentinel_ref() { return _sentinelRef; }
100 static oop* adr_sentinel_ref() { return &_sentinelRef; }
101 ReferencePolicy* setup_policy(bool always_clear) {
102 _current_soft_ref_policy = always_clear ?
103 _always_clear_soft_ref_policy : _default_soft_ref_policy;
104 _current_soft_ref_policy->setup(); // snapshot the policy threshold
105 return _current_soft_ref_policy;
106 }
108 public:
109 // Process references with a certain reachability level.
110 void process_discovered_reflist(DiscoveredList refs_lists[],
111 ReferencePolicy* policy,
112 bool clear_referent,
113 BoolObjectClosure* is_alive,
114 OopClosure* keep_alive,
115 VoidClosure* complete_gc,
116 AbstractRefProcTaskExecutor* task_executor);
118 void process_phaseJNI(BoolObjectClosure* is_alive,
119 OopClosure* keep_alive,
120 VoidClosure* complete_gc);
122 // Work methods used by the method process_discovered_reflist
123 // Phase1: keep alive all those referents that are otherwise
124 // dead but which must be kept alive by policy (and their closure).
125 void process_phase1(DiscoveredList& refs_list,
126 ReferencePolicy* policy,
127 BoolObjectClosure* is_alive,
128 OopClosure* keep_alive,
129 VoidClosure* complete_gc);
130 // Phase2: remove all those references whose referents are
131 // reachable.
132 inline void process_phase2(DiscoveredList& refs_list,
133 BoolObjectClosure* is_alive,
134 OopClosure* keep_alive,
135 VoidClosure* complete_gc) {
136 if (discovery_is_atomic()) {
137 // complete_gc is ignored in this case for this phase
138 pp2_work(refs_list, is_alive, keep_alive);
139 } else {
140 assert(complete_gc != NULL, "Error");
141 pp2_work_concurrent_discovery(refs_list, is_alive,
142 keep_alive, complete_gc);
143 }
144 }
145 // Work methods in support of process_phase2
146 void pp2_work(DiscoveredList& refs_list,
147 BoolObjectClosure* is_alive,
148 OopClosure* keep_alive);
149 void pp2_work_concurrent_discovery(
150 DiscoveredList& refs_list,
151 BoolObjectClosure* is_alive,
152 OopClosure* keep_alive,
153 VoidClosure* complete_gc);
154 // Phase3: process the referents by either clearing them
155 // or keeping them alive (and their closure)
156 void process_phase3(DiscoveredList& refs_list,
157 bool clear_referent,
158 BoolObjectClosure* is_alive,
159 OopClosure* keep_alive,
160 VoidClosure* complete_gc);
162 // Enqueue references with a certain reachability level
163 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
165 // "Preclean" all the discovered reference lists
166 // by removing references with strongly reachable referents.
167 // The first argument is a predicate on an oop that indicates
168 // its (strong) reachability and the second is a closure that
169 // may be used to incrementalize or abort the precleaning process.
170 // The caller is responsible for taking care of potential
171 // interference with concurrent operations on these lists
172 // (or predicates involved) by other threads. Currently
173 // only used by the CMS collector.
174 void preclean_discovered_references(BoolObjectClosure* is_alive,
175 OopClosure* keep_alive,
176 VoidClosure* complete_gc,
177 YieldClosure* yield);
179 // Delete entries in the discovered lists that have
180 // either a null referent or are not active. Such
181 // Reference objects can result from the clearing
182 // or enqueueing of Reference objects concurrent
183 // with their discovery by a (concurrent) collector.
184 // For a definition of "active" see java.lang.ref.Reference;
185 // Refs are born active, become inactive when enqueued,
186 // and never become active again. The state of being
187 // active is encoded as follows: A Ref is active
188 // if and only if its "next" field is NULL.
189 void clean_up_discovered_references();
190 void clean_up_discovered_reflist(DiscoveredList& refs_list);
192 // Returns the name of the discovered reference list
193 // occupying the i / _num_q slot.
194 const char* list_name(int i);
196 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
198 protected:
199 // "Preclean" the given discovered reference list
200 // by removing references with strongly reachable referents.
201 // Currently used in support of CMS only.
202 void preclean_discovered_reflist(DiscoveredList& refs_list,
203 BoolObjectClosure* is_alive,
204 OopClosure* keep_alive,
205 VoidClosure* complete_gc,
206 YieldClosure* yield);
208 int next_id() {
209 int id = _next_id;
210 if (++_next_id == _num_q) {
211 _next_id = 0;
212 }
213 return id;
214 }
215 DiscoveredList* get_discovered_list(ReferenceType rt);
216 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
217 HeapWord* discovered_addr);
218 void verify_ok_to_handle_reflists() PRODUCT_RETURN;
220 void abandon_partial_discovered_list(DiscoveredList& refs_list);
222 // Calculate the number of jni handles.
223 unsigned int count_jni_refs();
225 // Balances reference queues.
226 void balance_queues(DiscoveredList ref_lists[]);
228 // Update (advance) the soft ref master clock field.
229 void update_soft_ref_master_clock();
231 public:
232 // constructor
233 ReferenceProcessor():
234 _span((HeapWord*)NULL, (HeapWord*)NULL),
235 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL),
236 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
237 _discovering_refs(false),
238 _discovery_is_atomic(true),
239 _enqueuing_is_done(false),
240 _discovery_is_mt(false),
241 _discovered_list_needs_barrier(false),
242 _bs(NULL),
243 _is_alive_non_header(NULL),
244 _num_q(0),
245 _processing_is_mt(false),
246 _next_id(0)
247 {}
249 ReferenceProcessor(MemRegion span, bool atomic_discovery,
250 bool mt_discovery,
251 int mt_degree = 1,
252 bool mt_processing = false,
253 bool discovered_list_needs_barrier = false);
255 // Allocates and initializes a reference processor.
256 static ReferenceProcessor* create_ref_processor(
257 MemRegion span,
258 bool atomic_discovery,
259 bool mt_discovery,
260 BoolObjectClosure* is_alive_non_header = NULL,
261 int parallel_gc_threads = 1,
262 bool mt_processing = false,
263 bool discovered_list_needs_barrier = false);
264 // RefDiscoveryPolicy values
265 enum {
266 ReferenceBasedDiscovery = 0,
267 ReferentBasedDiscovery = 1
268 };
270 static void init_statics();
272 public:
273 // get and set "is_alive_non_header" field
274 BoolObjectClosure* is_alive_non_header() {
275 return _is_alive_non_header;
276 }
277 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
278 _is_alive_non_header = is_alive_non_header;
279 }
281 // get and set span
282 MemRegion span() { return _span; }
283 void set_span(MemRegion span) { _span = span; }
285 // start and stop weak ref discovery
286 void enable_discovery() { _discovering_refs = true; }
287 void disable_discovery() { _discovering_refs = false; }
288 bool discovery_enabled() { return _discovering_refs; }
290 // whether discovery is atomic wrt other collectors
291 bool discovery_is_atomic() const { return _discovery_is_atomic; }
292 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
294 // whether discovery is done by multiple threads same-old-timeously
295 bool discovery_is_mt() const { return _discovery_is_mt; }
296 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
298 // Whether we are in a phase when _processing_ is MT.
299 bool processing_is_mt() const { return _processing_is_mt; }
300 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
302 // whether all enqueuing of weak references is complete
303 bool enqueuing_is_done() { return _enqueuing_is_done; }
304 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
306 // iterate over oops
307 void weak_oops_do(OopClosure* f); // weak roots
308 static void oops_do(OopClosure* f); // strong root(s)
310 // Discover a Reference object, using appropriate discovery criteria
311 bool discover_reference(oop obj, ReferenceType rt);
313 // Process references found during GC (called by the garbage collector)
314 void process_discovered_references(BoolObjectClosure* is_alive,
315 OopClosure* keep_alive,
316 VoidClosure* complete_gc,
317 AbstractRefProcTaskExecutor* task_executor);
319 public:
320 // Enqueue references at end of GC (called by the garbage collector)
321 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
323 // If a discovery is in process that is being superceded, abandon it: all
324 // the discovered lists will be empty, and all the objects on them will
325 // have NULL discovered fields. Must be called only at a safepoint.
326 void abandon_partial_discovery();
328 // debugging
329 void verify_no_references_recorded() PRODUCT_RETURN;
330 static void verify();
332 // clear the discovered lists (unlinking each entry).
333 void clear_discovered_references() PRODUCT_RETURN;
334 };
336 // A utility class to disable reference discovery in
337 // the scope which contains it, for given ReferenceProcessor.
338 class NoRefDiscovery: StackObj {
339 private:
340 ReferenceProcessor* _rp;
341 bool _was_discovering_refs;
342 public:
343 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
344 if (_was_discovering_refs = _rp->discovery_enabled()) {
345 _rp->disable_discovery();
346 }
347 }
349 ~NoRefDiscovery() {
350 if (_was_discovering_refs) {
351 _rp->enable_discovery();
352 }
353 }
354 };
357 // A utility class to temporarily mutate the span of the
358 // given ReferenceProcessor in the scope that contains it.
359 class ReferenceProcessorSpanMutator: StackObj {
360 private:
361 ReferenceProcessor* _rp;
362 MemRegion _saved_span;
364 public:
365 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
366 MemRegion span):
367 _rp(rp) {
368 _saved_span = _rp->span();
369 _rp->set_span(span);
370 }
372 ~ReferenceProcessorSpanMutator() {
373 _rp->set_span(_saved_span);
374 }
375 };
377 // A utility class to temporarily change the MT'ness of
378 // reference discovery for the given ReferenceProcessor
379 // in the scope that contains it.
380 class ReferenceProcessorMTMutator: StackObj {
381 private:
382 ReferenceProcessor* _rp;
383 bool _saved_mt;
385 public:
386 ReferenceProcessorMTMutator(ReferenceProcessor* rp,
387 bool mt):
388 _rp(rp) {
389 _saved_mt = _rp->discovery_is_mt();
390 _rp->set_mt_discovery(mt);
391 }
393 ~ReferenceProcessorMTMutator() {
394 _rp->set_mt_discovery(_saved_mt);
395 }
396 };
399 // A utility class to temporarily change the disposition
400 // of the "is_alive_non_header" closure field of the
401 // given ReferenceProcessor in the scope that contains it.
402 class ReferenceProcessorIsAliveMutator: StackObj {
403 private:
404 ReferenceProcessor* _rp;
405 BoolObjectClosure* _saved_cl;
407 public:
408 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
409 BoolObjectClosure* cl):
410 _rp(rp) {
411 _saved_cl = _rp->is_alive_non_header();
412 _rp->set_is_alive_non_header(cl);
413 }
415 ~ReferenceProcessorIsAliveMutator() {
416 _rp->set_is_alive_non_header(_saved_cl);
417 }
418 };
420 // A utility class to temporarily change the disposition
421 // of the "discovery_is_atomic" field of the
422 // given ReferenceProcessor in the scope that contains it.
423 class ReferenceProcessorAtomicMutator: StackObj {
424 private:
425 ReferenceProcessor* _rp;
426 bool _saved_atomic_discovery;
428 public:
429 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
430 bool atomic):
431 _rp(rp) {
432 _saved_atomic_discovery = _rp->discovery_is_atomic();
433 _rp->set_atomic_discovery(atomic);
434 }
436 ~ReferenceProcessorAtomicMutator() {
437 _rp->set_atomic_discovery(_saved_atomic_discovery);
438 }
439 };
442 // A utility class to temporarily change the MT processing
443 // disposition of the given ReferenceProcessor instance
444 // in the scope that contains it.
445 class ReferenceProcessorMTProcMutator: StackObj {
446 private:
447 ReferenceProcessor* _rp;
448 bool _saved_mt;
450 public:
451 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
452 bool mt):
453 _rp(rp) {
454 _saved_mt = _rp->processing_is_mt();
455 _rp->set_mt_processing(mt);
456 }
458 ~ReferenceProcessorMTProcMutator() {
459 _rp->set_mt_processing(_saved_mt);
460 }
461 };
464 // This class is an interface used to implement task execution for the
465 // reference processing.
466 class AbstractRefProcTaskExecutor {
467 public:
469 // Abstract tasks to execute.
470 class ProcessTask;
471 class EnqueueTask;
473 // Executes a task using worker threads.
474 virtual void execute(ProcessTask& task) = 0;
475 virtual void execute(EnqueueTask& task) = 0;
477 // Switch to single threaded mode.
478 virtual void set_single_threaded_mode() { };
479 };
481 // Abstract reference processing task to execute.
482 class AbstractRefProcTaskExecutor::ProcessTask {
483 protected:
484 ProcessTask(ReferenceProcessor& ref_processor,
485 DiscoveredList refs_lists[],
486 bool marks_oops_alive)
487 : _ref_processor(ref_processor),
488 _refs_lists(refs_lists),
489 _marks_oops_alive(marks_oops_alive)
490 { }
492 public:
493 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
494 OopClosure& keep_alive,
495 VoidClosure& complete_gc) = 0;
497 // Returns true if a task marks some oops as alive.
498 bool marks_oops_alive() const
499 { return _marks_oops_alive; }
501 protected:
502 ReferenceProcessor& _ref_processor;
503 DiscoveredList* _refs_lists;
504 const bool _marks_oops_alive;
505 };
507 // Abstract reference processing task to execute.
508 class AbstractRefProcTaskExecutor::EnqueueTask {
509 protected:
510 EnqueueTask(ReferenceProcessor& ref_processor,
511 DiscoveredList refs_lists[],
512 HeapWord* pending_list_addr,
513 oop sentinel_ref,
514 int n_queues)
515 : _ref_processor(ref_processor),
516 _refs_lists(refs_lists),
517 _pending_list_addr(pending_list_addr),
518 _sentinel_ref(sentinel_ref),
519 _n_queues(n_queues)
520 { }
522 public:
523 virtual void work(unsigned int work_id) = 0;
525 protected:
526 ReferenceProcessor& _ref_processor;
527 DiscoveredList* _refs_lists;
528 HeapWord* _pending_list_addr;
529 oop _sentinel_ref;
530 int _n_queues;
531 };