src/share/vm/gc_implementation/parNew/parNewGeneration.hpp

Mon, 07 Jul 2014 10:12:40 +0200

author
stefank
date
Mon, 07 Jul 2014 10:12:40 +0200
changeset 6992
2c6ef90f030a
parent 5515
9766f73e770d
child 7031
ee019285a52c
permissions
-rw-r--r--

8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com

duke@435 1 /*
stefank@5515 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
stefank@2314 27
sla@5237 28 #include "gc_implementation/shared/gcTrace.hpp"
johnc@3982 29 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
sla@5237 30 #include "gc_implementation/shared/copyFailedInfo.hpp"
stefank@2314 31 #include "memory/defNewGeneration.hpp"
stefank@5515 32 #include "memory/padded.hpp"
stefank@2314 33 #include "utilities/taskqueue.hpp"
stefank@2314 34
duke@435 35 class ChunkArray;
duke@435 36 class ParScanWithoutBarrierClosure;
duke@435 37 class ParScanWithBarrierClosure;
duke@435 38 class ParRootScanWithoutBarrierClosure;
duke@435 39 class ParRootScanWithBarrierTwoGensClosure;
duke@435 40 class ParEvacuateFollowersClosure;
duke@435 41
duke@435 42 // It would be better if these types could be kept local to the .cpp file,
duke@435 43 // but they must be here to allow ParScanClosure::do_oop_work to be defined
duke@435 44 // in genOopClosures.inline.hpp.
duke@435 45
jcoomes@2020 46 typedef Padded<OopTaskQueue> ObjToScanQueue;
zgu@3900 47 typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
duke@435 48
duke@435 49 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
coleenp@548 50 private:
duke@435 51 ParScanWeakRefClosure* _par_cl;
coleenp@548 52 protected:
coleenp@548 53 template <class T> void do_oop_work(T* p);
duke@435 54 public:
duke@435 55 ParKeepAliveClosure(ParScanWeakRefClosure* cl);
coleenp@548 56 virtual void do_oop(oop* p);
coleenp@548 57 virtual void do_oop(narrowOop* p);
duke@435 58 };
duke@435 59
duke@435 60 // The state needed by thread performing parallel young-gen collection.
duke@435 61 class ParScanThreadState {
duke@435 62 friend class ParScanThreadStateSet;
coleenp@548 63 private:
duke@435 64 ObjToScanQueue *_work_queue;
zgu@3900 65 Stack<oop, mtGC>* const _overflow_stack;
duke@435 66
duke@435 67 ParGCAllocBuffer _to_space_alloc_buffer;
duke@435 68
duke@435 69 ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
duke@435 70 ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
duke@435 71 ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
stefank@6992 72 // One of these two will be passed to process_roots, which will
duke@435 73 // set its generation. The first is for two-gen configs where the
duke@435 74 // old gen collects the perm gen; the second is for arbitrary configs.
duke@435 75 // The second isn't used right now (it used to be used for the train, an
duke@435 76 // incremental collector) but the declaration has been left as a reminder.
duke@435 77 ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
duke@435 78 // This closure will always be bound to the old gen; it will be used
duke@435 79 // in evacuate_followers.
duke@435 80 ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
duke@435 81 ParEvacuateFollowersClosure _evacuate_followers;
duke@435 82 DefNewGeneration::IsAliveClosure _is_alive_closure;
duke@435 83 ParScanWeakRefClosure _scan_weak_ref_closure;
duke@435 84 ParKeepAliveClosure _keep_alive_closure;
duke@435 85
duke@435 86
duke@435 87 Space* _to_space;
duke@435 88 Space* to_space() { return _to_space; }
duke@435 89
ysr@1114 90 ParNewGeneration* _young_gen;
ysr@1114 91 ParNewGeneration* young_gen() const { return _young_gen; }
ysr@1114 92
duke@435 93 Generation* _old_gen;
duke@435 94 Generation* old_gen() { return _old_gen; }
duke@435 95
duke@435 96 HeapWord *_young_old_boundary;
duke@435 97
duke@435 98 int _hash_seed;
duke@435 99 int _thread_num;
duke@435 100 ageTable _ageTable;
duke@435 101
duke@435 102 bool _to_space_full;
duke@435 103
jcoomes@2065 104 #if TASKQUEUE_STATS
jcoomes@2065 105 size_t _term_attempts;
jcoomes@2065 106 size_t _overflow_refills;
jcoomes@2065 107 size_t _overflow_refill_objs;
jcoomes@2065 108 #endif // TASKQUEUE_STATS
duke@435 109
ysr@1580 110 // Stats for promotion failure
sla@5237 111 PromotionFailedInfo _promotion_failed_info;
ysr@1580 112
duke@435 113 // Timing numbers.
duke@435 114 double _start;
duke@435 115 double _start_strong_roots;
duke@435 116 double _strong_roots_time;
duke@435 117 double _start_term;
duke@435 118 double _term_time;
duke@435 119
duke@435 120 // Helper for trim_queues. Scans subset of an array and makes
duke@435 121 // remainder available for work stealing.
duke@435 122 void scan_partial_array_and_push_remainder(oop obj);
duke@435 123
duke@435 124 // In support of CMS' parallel rescan of survivor space.
duke@435 125 ChunkArray* _survivor_chunk_array;
duke@435 126 ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
duke@435 127
duke@435 128 void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
duke@435 129
duke@435 130 ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
duke@435 131 Generation* old_gen_, int thread_num_,
ysr@1130 132 ObjToScanQueueSet* work_queue_set_,
zgu@3900 133 Stack<oop, mtGC>* overflow_stacks_,
ysr@1130 134 size_t desired_plab_sz_,
duke@435 135 ParallelTaskTerminator& term_);
duke@435 136
coleenp@548 137 public:
duke@435 138 ageTable* age_table() {return &_ageTable;}
duke@435 139
duke@435 140 ObjToScanQueue* work_queue() { return _work_queue; }
duke@435 141
duke@435 142 ParGCAllocBuffer* to_space_alloc_buffer() {
duke@435 143 return &_to_space_alloc_buffer;
duke@435 144 }
duke@435 145
duke@435 146 ParEvacuateFollowersClosure& evacuate_followers_closure() { return _evacuate_followers; }
duke@435 147 DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
duke@435 148 ParScanWeakRefClosure& scan_weak_ref_closure() { return _scan_weak_ref_closure; }
duke@435 149 ParKeepAliveClosure& keep_alive_closure() { return _keep_alive_closure; }
duke@435 150 ParScanClosure& older_gen_closure() { return _older_gen_closure; }
duke@435 151 ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
duke@435 152
duke@435 153 // Decrease queue size below "max_size".
duke@435 154 void trim_queues(int max_size);
duke@435 155
ysr@1114 156 // Private overflow stack usage
zgu@3900 157 Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
ysr@1114 158 bool take_from_overflow_stack();
ysr@1114 159 void push_on_overflow_stack(oop p);
ysr@1114 160
duke@435 161 // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
duke@435 162 inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
duke@435 163
duke@435 164 int* hash_seed() { return &_hash_seed; }
duke@435 165 int thread_num() { return _thread_num; }
duke@435 166
duke@435 167 // Allocate a to-space block of size "sz", or else return NULL.
duke@435 168 HeapWord* alloc_in_to_space_slow(size_t word_sz);
duke@435 169
duke@435 170 HeapWord* alloc_in_to_space(size_t word_sz) {
duke@435 171 HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz);
duke@435 172 if (obj != NULL) return obj;
duke@435 173 else return alloc_in_to_space_slow(word_sz);
duke@435 174 }
duke@435 175
duke@435 176 HeapWord* young_old_boundary() { return _young_old_boundary; }
duke@435 177
duke@435 178 void set_young_old_boundary(HeapWord *boundary) {
duke@435 179 _young_old_boundary = boundary;
duke@435 180 }
duke@435 181
duke@435 182 // Undo the most recent allocation ("obj", of "word_sz").
duke@435 183 void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
duke@435 184
ysr@1580 185 // Promotion failure stats
sla@5237 186 void register_promotion_failure(size_t sz) {
sla@5237 187 _promotion_failed_info.register_copy_failure(sz);
ysr@1580 188 }
sla@5237 189 PromotionFailedInfo& promotion_failed_info() {
sla@5237 190 return _promotion_failed_info;
sla@5237 191 }
sla@5237 192 bool promotion_failed() {
sla@5237 193 return _promotion_failed_info.has_failed();
sla@5237 194 }
sla@5237 195 void print_promotion_failure_size();
ysr@1580 196
jcoomes@2065 197 #if TASKQUEUE_STATS
jcoomes@2065 198 TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
duke@435 199
jcoomes@2065 200 size_t term_attempts() const { return _term_attempts; }
jcoomes@2065 201 size_t overflow_refills() const { return _overflow_refills; }
jcoomes@2065 202 size_t overflow_refill_objs() const { return _overflow_refill_objs; }
jcoomes@2065 203
jcoomes@2065 204 void note_term_attempt() { ++_term_attempts; }
jcoomes@2065 205 void note_overflow_refill(size_t objs) {
jcoomes@2065 206 ++_overflow_refills; _overflow_refill_objs += objs;
duke@435 207 }
duke@435 208
jcoomes@2065 209 void reset_stats();
jcoomes@2065 210 #endif // TASKQUEUE_STATS
jcoomes@2065 211
duke@435 212 void start_strong_roots() {
duke@435 213 _start_strong_roots = os::elapsedTime();
duke@435 214 }
duke@435 215 void end_strong_roots() {
duke@435 216 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
duke@435 217 }
jcoomes@2065 218 double strong_roots_time() const { return _strong_roots_time; }
duke@435 219 void start_term_time() {
jcoomes@2065 220 TASKQUEUE_STATS_ONLY(note_term_attempt());
duke@435 221 _start_term = os::elapsedTime();
duke@435 222 }
duke@435 223 void end_term_time() {
duke@435 224 _term_time += (os::elapsedTime() - _start_term);
duke@435 225 }
jcoomes@2065 226 double term_time() const { return _term_time; }
duke@435 227
jcoomes@2065 228 double elapsed_time() const {
duke@435 229 return os::elapsedTime() - _start;
duke@435 230 }
duke@435 231 };
duke@435 232
duke@435 233 class ParNewGenTask: public AbstractGangTask {
coleenp@548 234 private:
coleenp@548 235 ParNewGeneration* _gen;
coleenp@548 236 Generation* _next_gen;
coleenp@548 237 HeapWord* _young_old_boundary;
duke@435 238 class ParScanThreadStateSet* _state_set;
duke@435 239
duke@435 240 public:
duke@435 241 ParNewGenTask(ParNewGeneration* gen,
duke@435 242 Generation* next_gen,
duke@435 243 HeapWord* young_old_boundary,
duke@435 244 ParScanThreadStateSet* state_set);
duke@435 245
duke@435 246 HeapWord* young_old_boundary() { return _young_old_boundary; }
duke@435 247
jmasa@3357 248 void work(uint worker_id);
jmasa@3294 249
jmasa@3294 250 // Reset the terminator in ParScanThreadStateSet for
jmasa@3294 251 // "active_workers" threads.
jmasa@3294 252 virtual void set_for_termination(int active_workers);
duke@435 253 };
duke@435 254
duke@435 255 class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
coleenp@548 256 protected:
coleenp@548 257 template <class T> void do_oop_work(T* p);
duke@435 258 public:
duke@435 259 KeepAliveClosure(ScanWeakRefClosure* cl);
coleenp@548 260 virtual void do_oop(oop* p);
coleenp@548 261 virtual void do_oop(narrowOop* p);
duke@435 262 };
duke@435 263
duke@435 264 class EvacuateFollowersClosureGeneral: public VoidClosure {
coleenp@548 265 private:
coleenp@548 266 GenCollectedHeap* _gch;
coleenp@548 267 int _level;
coleenp@548 268 OopsInGenClosure* _scan_cur_or_nonheap;
coleenp@548 269 OopsInGenClosure* _scan_older;
coleenp@548 270 public:
coleenp@548 271 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
coleenp@548 272 OopsInGenClosure* cur,
coleenp@548 273 OopsInGenClosure* older);
coleenp@548 274 virtual void do_void();
duke@435 275 };
duke@435 276
duke@435 277 // Closure for scanning ParNewGeneration.
duke@435 278 // Same as ScanClosure, except does parallel GC barrier.
duke@435 279 class ScanClosureWithParBarrier: public ScanClosure {
coleenp@548 280 protected:
coleenp@548 281 template <class T> void do_oop_work(T* p);
coleenp@548 282 public:
duke@435 283 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
coleenp@548 284 virtual void do_oop(oop* p);
coleenp@548 285 virtual void do_oop(narrowOop* p);
duke@435 286 };
duke@435 287
duke@435 288 // Implements AbstractRefProcTaskExecutor for ParNew.
duke@435 289 class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
coleenp@548 290 private:
coleenp@548 291 ParNewGeneration& _generation;
coleenp@548 292 ParScanThreadStateSet& _state_set;
coleenp@548 293 public:
duke@435 294 ParNewRefProcTaskExecutor(ParNewGeneration& generation,
duke@435 295 ParScanThreadStateSet& state_set)
duke@435 296 : _generation(generation), _state_set(state_set)
duke@435 297 { }
duke@435 298
duke@435 299 // Executes a task using worker threads.
duke@435 300 virtual void execute(ProcessTask& task);
duke@435 301 virtual void execute(EnqueueTask& task);
duke@435 302 // Switch to single threaded mode.
duke@435 303 virtual void set_single_threaded_mode();
duke@435 304 };
duke@435 305
duke@435 306
duke@435 307 // A Generation that does parallel young-gen collection.
duke@435 308
duke@435 309 class ParNewGeneration: public DefNewGeneration {
duke@435 310 friend class ParNewGenTask;
duke@435 311 friend class ParNewRefProcTask;
duke@435 312 friend class ParNewRefProcTaskExecutor;
duke@435 313 friend class ParScanThreadStateSet;
ysr@969 314 friend class ParEvacuateFollowersClosure;
duke@435 315
coleenp@548 316 private:
ysr@1130 317 // The per-worker-thread work queues
duke@435 318 ObjToScanQueueSet* _task_queues;
duke@435 319
ysr@1130 320 // Per-worker-thread local overflow stacks
zgu@3900 321 Stack<oop, mtGC>* _overflow_stacks;
ysr@1130 322
duke@435 323 // Desired size of survivor space plab's
duke@435 324 PLABStats _plab_stats;
duke@435 325
duke@435 326 // A list of from-space images of to-be-scanned objects, threaded through
duke@435 327 // klass-pointers (klass information already copied to the forwarded
duke@435 328 // image.) Manipulated with CAS.
duke@435 329 oop _overflow_list;
ysr@969 330 NOT_PRODUCT(ssize_t _num_par_pushes;)
duke@435 331
duke@435 332 // If true, older generation does not support promotion undo, so avoid.
duke@435 333 static bool _avoid_promotion_undo;
duke@435 334
duke@435 335 // This closure is used by the reference processor to filter out
duke@435 336 // references to live referent.
duke@435 337 DefNewGeneration::IsAliveClosure _is_alive_closure;
duke@435 338
duke@435 339 static oop real_forwardee_slow(oop obj);
duke@435 340 static void waste_some_time();
duke@435 341
duke@435 342 // Preserve the mark of "obj", if necessary, in preparation for its mark
duke@435 343 // word being overwritten with a self-forwarding-pointer.
duke@435 344 void preserve_mark_if_necessary(oop obj, markOop m);
duke@435 345
sla@5237 346 void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);
sla@5237 347
duke@435 348 protected:
duke@435 349
duke@435 350 bool _survivor_overflow;
duke@435 351
duke@435 352 bool avoid_promotion_undo() { return _avoid_promotion_undo; }
duke@435 353 void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
duke@435 354
duke@435 355 bool survivor_overflow() { return _survivor_overflow; }
duke@435 356 void set_survivor_overflow(bool v) { _survivor_overflow = v; }
duke@435 357
coleenp@548 358 public:
duke@435 359 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
duke@435 360
duke@435 361 ~ParNewGeneration() {
duke@435 362 for (uint i = 0; i < ParallelGCThreads; i++)
duke@435 363 delete _task_queues->queue(i);
duke@435 364
duke@435 365 delete _task_queues;
duke@435 366 }
duke@435 367
duke@435 368 virtual void ref_processor_init();
duke@435 369 virtual Generation::Name kind() { return Generation::ParNew; }
duke@435 370 virtual const char* name() const;
duke@435 371 virtual const char* short_name() const { return "ParNew"; }
duke@435 372
duke@435 373 // override
duke@435 374 virtual bool refs_discovery_is_mt() const {
duke@435 375 assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");
duke@435 376 return ParallelGCThreads > 1;
duke@435 377 }
duke@435 378
duke@435 379 // Make the collection virtual.
duke@435 380 virtual void collect(bool full,
duke@435 381 bool clear_all_soft_refs,
duke@435 382 size_t size,
duke@435 383 bool is_tlab);
duke@435 384
duke@435 385 // This needs to be visible to the closure function.
duke@435 386 // "obj" is the object to be copied, "m" is a recent value of its mark
duke@435 387 // that must not contain a forwarding pointer (though one might be
duke@435 388 // inserted in "obj"s mark word by a parallel thread).
duke@435 389 inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
duke@435 390 oop obj, size_t obj_sz, markOop m) {
duke@435 391 if (_avoid_promotion_undo) {
duke@435 392 return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
duke@435 393 obj, obj_sz, m);
duke@435 394 }
duke@435 395
duke@435 396 return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
duke@435 397 }
duke@435 398
duke@435 399 oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
duke@435 400 oop obj, size_t obj_sz, markOop m);
duke@435 401
duke@435 402 oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
duke@435 403 oop obj, size_t obj_sz, markOop m);
duke@435 404
ysr@969 405 // in support of testing overflow code
ysr@969 406 NOT_PRODUCT(int _overflow_counter;)
ysr@969 407 NOT_PRODUCT(bool should_simulate_overflow();)
ysr@969 408
ysr@1114 409 // Accessor for overflow list
ysr@1114 410 oop overflow_list() { return _overflow_list; }
ysr@1114 411
duke@435 412 // Push the given (from-space) object on the global overflow list.
ysr@969 413 void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
duke@435 414
duke@435 415 // If the global overflow list is non-empty, move some tasks from it
ysr@1114 416 // onto "work_q" (which need not be empty). No more than 1/4 of the
ysr@1114 417 // available space on "work_q" is used.
duke@435 418 bool take_from_overflow_list(ParScanThreadState* par_scan_state);
ysr@1114 419 bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
duke@435 420
duke@435 421 // The task queues to be used by parallel GC threads.
duke@435 422 ObjToScanQueueSet* task_queues() {
duke@435 423 return _task_queues;
duke@435 424 }
duke@435 425
duke@435 426 PLABStats* plab_stats() {
duke@435 427 return &_plab_stats;
duke@435 428 }
duke@435 429
duke@435 430 size_t desired_plab_sz() {
duke@435 431 return _plab_stats.desired_plab_sz();
duke@435 432 }
duke@435 433
duke@435 434 static oop real_forwardee(oop obj);
duke@435 435
duke@435 436 DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);)
duke@435 437 };
stefank@2314 438
stefank@2314 439 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP

mercurial