Mon, 07 Jul 2014 10:12:40 +0200
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
28 #include "gc_implementation/shared/gcTrace.hpp"
29 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
30 #include "gc_implementation/shared/copyFailedInfo.hpp"
31 #include "memory/defNewGeneration.hpp"
32 #include "memory/padded.hpp"
33 #include "utilities/taskqueue.hpp"
35 class ChunkArray;
36 class ParScanWithoutBarrierClosure;
37 class ParScanWithBarrierClosure;
38 class ParRootScanWithoutBarrierClosure;
39 class ParRootScanWithBarrierTwoGensClosure;
40 class ParEvacuateFollowersClosure;
42 // It would be better if these types could be kept local to the .cpp file,
43 // but they must be here to allow ParScanClosure::do_oop_work to be defined
44 // in genOopClosures.inline.hpp.
46 typedef Padded<OopTaskQueue> ObjToScanQueue;
47 typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
49 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
50 private:
51 ParScanWeakRefClosure* _par_cl;
52 protected:
53 template <class T> void do_oop_work(T* p);
54 public:
55 ParKeepAliveClosure(ParScanWeakRefClosure* cl);
56 virtual void do_oop(oop* p);
57 virtual void do_oop(narrowOop* p);
58 };
60 // The state needed by thread performing parallel young-gen collection.
61 class ParScanThreadState {
62 friend class ParScanThreadStateSet;
63 private:
64 ObjToScanQueue *_work_queue;
65 Stack<oop, mtGC>* const _overflow_stack;
67 ParGCAllocBuffer _to_space_alloc_buffer;
69 ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
70 ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
71 ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
72 // One of these two will be passed to process_roots, which will
73 // set its generation. The first is for two-gen configs where the
74 // old gen collects the perm gen; the second is for arbitrary configs.
75 // The second isn't used right now (it used to be used for the train, an
76 // incremental collector) but the declaration has been left as a reminder.
77 ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
78 // This closure will always be bound to the old gen; it will be used
79 // in evacuate_followers.
80 ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
81 ParEvacuateFollowersClosure _evacuate_followers;
82 DefNewGeneration::IsAliveClosure _is_alive_closure;
83 ParScanWeakRefClosure _scan_weak_ref_closure;
84 ParKeepAliveClosure _keep_alive_closure;
87 Space* _to_space;
88 Space* to_space() { return _to_space; }
90 ParNewGeneration* _young_gen;
91 ParNewGeneration* young_gen() const { return _young_gen; }
93 Generation* _old_gen;
94 Generation* old_gen() { return _old_gen; }
96 HeapWord *_young_old_boundary;
98 int _hash_seed;
99 int _thread_num;
100 ageTable _ageTable;
102 bool _to_space_full;
104 #if TASKQUEUE_STATS
105 size_t _term_attempts;
106 size_t _overflow_refills;
107 size_t _overflow_refill_objs;
108 #endif // TASKQUEUE_STATS
110 // Stats for promotion failure
111 PromotionFailedInfo _promotion_failed_info;
113 // Timing numbers.
114 double _start;
115 double _start_strong_roots;
116 double _strong_roots_time;
117 double _start_term;
118 double _term_time;
120 // Helper for trim_queues. Scans subset of an array and makes
121 // remainder available for work stealing.
122 void scan_partial_array_and_push_remainder(oop obj);
124 // In support of CMS' parallel rescan of survivor space.
125 ChunkArray* _survivor_chunk_array;
126 ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
128 void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
130 ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
131 Generation* old_gen_, int thread_num_,
132 ObjToScanQueueSet* work_queue_set_,
133 Stack<oop, mtGC>* overflow_stacks_,
134 size_t desired_plab_sz_,
135 ParallelTaskTerminator& term_);
137 public:
138 ageTable* age_table() {return &_ageTable;}
140 ObjToScanQueue* work_queue() { return _work_queue; }
142 ParGCAllocBuffer* to_space_alloc_buffer() {
143 return &_to_space_alloc_buffer;
144 }
146 ParEvacuateFollowersClosure& evacuate_followers_closure() { return _evacuate_followers; }
147 DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
148 ParScanWeakRefClosure& scan_weak_ref_closure() { return _scan_weak_ref_closure; }
149 ParKeepAliveClosure& keep_alive_closure() { return _keep_alive_closure; }
150 ParScanClosure& older_gen_closure() { return _older_gen_closure; }
151 ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
153 // Decrease queue size below "max_size".
154 void trim_queues(int max_size);
156 // Private overflow stack usage
157 Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
158 bool take_from_overflow_stack();
159 void push_on_overflow_stack(oop p);
161 // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
162 inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
164 int* hash_seed() { return &_hash_seed; }
165 int thread_num() { return _thread_num; }
167 // Allocate a to-space block of size "sz", or else return NULL.
168 HeapWord* alloc_in_to_space_slow(size_t word_sz);
170 HeapWord* alloc_in_to_space(size_t word_sz) {
171 HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz);
172 if (obj != NULL) return obj;
173 else return alloc_in_to_space_slow(word_sz);
174 }
176 HeapWord* young_old_boundary() { return _young_old_boundary; }
178 void set_young_old_boundary(HeapWord *boundary) {
179 _young_old_boundary = boundary;
180 }
182 // Undo the most recent allocation ("obj", of "word_sz").
183 void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
185 // Promotion failure stats
186 void register_promotion_failure(size_t sz) {
187 _promotion_failed_info.register_copy_failure(sz);
188 }
189 PromotionFailedInfo& promotion_failed_info() {
190 return _promotion_failed_info;
191 }
192 bool promotion_failed() {
193 return _promotion_failed_info.has_failed();
194 }
195 void print_promotion_failure_size();
197 #if TASKQUEUE_STATS
198 TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
200 size_t term_attempts() const { return _term_attempts; }
201 size_t overflow_refills() const { return _overflow_refills; }
202 size_t overflow_refill_objs() const { return _overflow_refill_objs; }
204 void note_term_attempt() { ++_term_attempts; }
205 void note_overflow_refill(size_t objs) {
206 ++_overflow_refills; _overflow_refill_objs += objs;
207 }
209 void reset_stats();
210 #endif // TASKQUEUE_STATS
212 void start_strong_roots() {
213 _start_strong_roots = os::elapsedTime();
214 }
215 void end_strong_roots() {
216 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
217 }
218 double strong_roots_time() const { return _strong_roots_time; }
219 void start_term_time() {
220 TASKQUEUE_STATS_ONLY(note_term_attempt());
221 _start_term = os::elapsedTime();
222 }
223 void end_term_time() {
224 _term_time += (os::elapsedTime() - _start_term);
225 }
226 double term_time() const { return _term_time; }
228 double elapsed_time() const {
229 return os::elapsedTime() - _start;
230 }
231 };
233 class ParNewGenTask: public AbstractGangTask {
234 private:
235 ParNewGeneration* _gen;
236 Generation* _next_gen;
237 HeapWord* _young_old_boundary;
238 class ParScanThreadStateSet* _state_set;
240 public:
241 ParNewGenTask(ParNewGeneration* gen,
242 Generation* next_gen,
243 HeapWord* young_old_boundary,
244 ParScanThreadStateSet* state_set);
246 HeapWord* young_old_boundary() { return _young_old_boundary; }
248 void work(uint worker_id);
250 // Reset the terminator in ParScanThreadStateSet for
251 // "active_workers" threads.
252 virtual void set_for_termination(int active_workers);
253 };
255 class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
256 protected:
257 template <class T> void do_oop_work(T* p);
258 public:
259 KeepAliveClosure(ScanWeakRefClosure* cl);
260 virtual void do_oop(oop* p);
261 virtual void do_oop(narrowOop* p);
262 };
264 class EvacuateFollowersClosureGeneral: public VoidClosure {
265 private:
266 GenCollectedHeap* _gch;
267 int _level;
268 OopsInGenClosure* _scan_cur_or_nonheap;
269 OopsInGenClosure* _scan_older;
270 public:
271 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
272 OopsInGenClosure* cur,
273 OopsInGenClosure* older);
274 virtual void do_void();
275 };
277 // Closure for scanning ParNewGeneration.
278 // Same as ScanClosure, except does parallel GC barrier.
279 class ScanClosureWithParBarrier: public ScanClosure {
280 protected:
281 template <class T> void do_oop_work(T* p);
282 public:
283 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
284 virtual void do_oop(oop* p);
285 virtual void do_oop(narrowOop* p);
286 };
288 // Implements AbstractRefProcTaskExecutor for ParNew.
289 class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
290 private:
291 ParNewGeneration& _generation;
292 ParScanThreadStateSet& _state_set;
293 public:
294 ParNewRefProcTaskExecutor(ParNewGeneration& generation,
295 ParScanThreadStateSet& state_set)
296 : _generation(generation), _state_set(state_set)
297 { }
299 // Executes a task using worker threads.
300 virtual void execute(ProcessTask& task);
301 virtual void execute(EnqueueTask& task);
302 // Switch to single threaded mode.
303 virtual void set_single_threaded_mode();
304 };
307 // A Generation that does parallel young-gen collection.
309 class ParNewGeneration: public DefNewGeneration {
310 friend class ParNewGenTask;
311 friend class ParNewRefProcTask;
312 friend class ParNewRefProcTaskExecutor;
313 friend class ParScanThreadStateSet;
314 friend class ParEvacuateFollowersClosure;
316 private:
317 // The per-worker-thread work queues
318 ObjToScanQueueSet* _task_queues;
320 // Per-worker-thread local overflow stacks
321 Stack<oop, mtGC>* _overflow_stacks;
323 // Desired size of survivor space plab's
324 PLABStats _plab_stats;
326 // A list of from-space images of to-be-scanned objects, threaded through
327 // klass-pointers (klass information already copied to the forwarded
328 // image.) Manipulated with CAS.
329 oop _overflow_list;
330 NOT_PRODUCT(ssize_t _num_par_pushes;)
332 // If true, older generation does not support promotion undo, so avoid.
333 static bool _avoid_promotion_undo;
335 // This closure is used by the reference processor to filter out
336 // references to live referent.
337 DefNewGeneration::IsAliveClosure _is_alive_closure;
339 static oop real_forwardee_slow(oop obj);
340 static void waste_some_time();
342 // Preserve the mark of "obj", if necessary, in preparation for its mark
343 // word being overwritten with a self-forwarding-pointer.
344 void preserve_mark_if_necessary(oop obj, markOop m);
346 void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);
348 protected:
350 bool _survivor_overflow;
352 bool avoid_promotion_undo() { return _avoid_promotion_undo; }
353 void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
355 bool survivor_overflow() { return _survivor_overflow; }
356 void set_survivor_overflow(bool v) { _survivor_overflow = v; }
358 public:
359 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
361 ~ParNewGeneration() {
362 for (uint i = 0; i < ParallelGCThreads; i++)
363 delete _task_queues->queue(i);
365 delete _task_queues;
366 }
368 virtual void ref_processor_init();
369 virtual Generation::Name kind() { return Generation::ParNew; }
370 virtual const char* name() const;
371 virtual const char* short_name() const { return "ParNew"; }
373 // override
374 virtual bool refs_discovery_is_mt() const {
375 assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");
376 return ParallelGCThreads > 1;
377 }
379 // Make the collection virtual.
380 virtual void collect(bool full,
381 bool clear_all_soft_refs,
382 size_t size,
383 bool is_tlab);
385 // This needs to be visible to the closure function.
386 // "obj" is the object to be copied, "m" is a recent value of its mark
387 // that must not contain a forwarding pointer (though one might be
388 // inserted in "obj"s mark word by a parallel thread).
389 inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
390 oop obj, size_t obj_sz, markOop m) {
391 if (_avoid_promotion_undo) {
392 return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
393 obj, obj_sz, m);
394 }
396 return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
397 }
399 oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
400 oop obj, size_t obj_sz, markOop m);
402 oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
403 oop obj, size_t obj_sz, markOop m);
405 // in support of testing overflow code
406 NOT_PRODUCT(int _overflow_counter;)
407 NOT_PRODUCT(bool should_simulate_overflow();)
409 // Accessor for overflow list
410 oop overflow_list() { return _overflow_list; }
412 // Push the given (from-space) object on the global overflow list.
413 void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
415 // If the global overflow list is non-empty, move some tasks from it
416 // onto "work_q" (which need not be empty). No more than 1/4 of the
417 // available space on "work_q" is used.
418 bool take_from_overflow_list(ParScanThreadState* par_scan_state);
419 bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
421 // The task queues to be used by parallel GC threads.
422 ObjToScanQueueSet* task_queues() {
423 return _task_queues;
424 }
426 PLABStats* plab_stats() {
427 return &_plab_stats;
428 }
430 size_t desired_plab_sz() {
431 return _plab_stats.desired_plab_sz();
432 }
434 static oop real_forwardee(oop obj);
436 DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);)
437 };
439 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP