Mon, 06 Aug 2012 12:20:14 -0700
6818524: G1: use ergonomic resizing of PLABs
Summary: Employ PLABStats instances to record information about survivor and old PLABs, and use the recorded stats to adjust the sizes of survivor and old PLABS.
Reviewed-by: johnc, ysr
Contributed-by: Brandon Mitchell <brandon@twitter.com>
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
27 #include "gc_implementation/parNew/parNewGeneration.hpp"
28 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
30 #include "gc_implementation/shared/ageTable.hpp"
31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
32 #include "gc_implementation/shared/spaceDecorator.hpp"
33 #include "memory/defNewGeneration.inline.hpp"
34 #include "memory/genCollectedHeap.hpp"
35 #include "memory/genOopClosures.inline.hpp"
36 #include "memory/generation.hpp"
37 #include "memory/generation.inline.hpp"
38 #include "memory/referencePolicy.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "memory/sharedHeap.hpp"
41 #include "memory/space.hpp"
42 #include "oops/objArrayOop.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "oops/oop.pcgc.inline.hpp"
45 #include "runtime/handles.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/thread.hpp"
49 #include "utilities/copy.hpp"
50 #include "utilities/globalDefinitions.hpp"
51 #include "utilities/workgroup.hpp"
53 #ifdef _MSC_VER
54 #pragma warning( push )
55 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
56 #endif
57 ParScanThreadState::ParScanThreadState(Space* to_space_,
58 ParNewGeneration* gen_,
59 Generation* old_gen_,
60 int thread_num_,
61 ObjToScanQueueSet* work_queue_set_,
62 Stack<oop, mtGC>* overflow_stacks_,
63 size_t desired_plab_sz_,
64 ParallelTaskTerminator& term_) :
65 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
66 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
67 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
68 _ageTable(false), // false ==> not the global age table, no perf data.
69 _to_space_alloc_buffer(desired_plab_sz_),
70 _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
71 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
72 _older_gen_closure(gen_, this),
73 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
74 &_to_space_root_closure, gen_, &_old_gen_root_closure,
75 work_queue_set_, &term_),
76 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
77 _keep_alive_closure(&_scan_weak_ref_closure),
78 _promotion_failure_size(0),
79 _strong_roots_time(0.0), _term_time(0.0)
80 {
81 #if TASKQUEUE_STATS
82 _term_attempts = 0;
83 _overflow_refills = 0;
84 _overflow_refill_objs = 0;
85 #endif // TASKQUEUE_STATS
87 _survivor_chunk_array =
88 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
89 _hash_seed = 17; // Might want to take time-based random value.
90 _start = os::elapsedTime();
91 _old_gen_closure.set_generation(old_gen_);
92 _old_gen_root_closure.set_generation(old_gen_);
93 }
94 #ifdef _MSC_VER
95 #pragma warning( pop )
96 #endif
98 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
99 size_t plab_word_size) {
100 ChunkArray* sca = survivor_chunk_array();
101 if (sca != NULL) {
102 // A non-null SCA implies that we want the PLAB data recorded.
103 sca->record_sample(plab_start, plab_word_size);
104 }
105 }
107 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
108 return new_obj->is_objArray() &&
109 arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
110 new_obj != old_obj;
111 }
113 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
114 assert(old->is_objArray(), "must be obj array");
115 assert(old->is_forwarded(), "must be forwarded");
116 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
117 assert(!old_gen()->is_in(old), "must be in young generation.");
119 objArrayOop obj = objArrayOop(old->forwardee());
120 // Process ParGCArrayScanChunk elements now
121 // and push the remainder back onto queue
122 int start = arrayOop(old)->length();
123 int end = obj->length();
124 int remainder = end - start;
125 assert(start <= end, "just checking");
126 if (remainder > 2 * ParGCArrayScanChunk) {
127 // Test above combines last partial chunk with a full chunk
128 end = start + ParGCArrayScanChunk;
129 arrayOop(old)->set_length(end);
130 // Push remainder.
131 bool ok = work_queue()->push(old);
132 assert(ok, "just popped, push must be okay");
133 } else {
134 // Restore length so that it can be used if there
135 // is a promotion failure and forwarding pointers
136 // must be removed.
137 arrayOop(old)->set_length(end);
138 }
140 // process our set of indices (include header in first chunk)
141 // should make sure end is even (aligned to HeapWord in case of compressed oops)
142 if ((HeapWord *)obj < young_old_boundary()) {
143 // object is in to_space
144 obj->oop_iterate_range(&_to_space_closure, start, end);
145 } else {
146 // object is in old generation
147 obj->oop_iterate_range(&_old_gen_closure, start, end);
148 }
149 }
152 void ParScanThreadState::trim_queues(int max_size) {
153 ObjToScanQueue* queue = work_queue();
154 do {
155 while (queue->size() > (juint)max_size) {
156 oop obj_to_scan;
157 if (queue->pop_local(obj_to_scan)) {
158 if ((HeapWord *)obj_to_scan < young_old_boundary()) {
159 if (obj_to_scan->is_objArray() &&
160 obj_to_scan->is_forwarded() &&
161 obj_to_scan->forwardee() != obj_to_scan) {
162 scan_partial_array_and_push_remainder(obj_to_scan);
163 } else {
164 // object is in to_space
165 obj_to_scan->oop_iterate(&_to_space_closure);
166 }
167 } else {
168 // object is in old generation
169 obj_to_scan->oop_iterate(&_old_gen_closure);
170 }
171 }
172 }
173 // For the case of compressed oops, we have a private, non-shared
174 // overflow stack, so we eagerly drain it so as to more evenly
175 // distribute load early. Note: this may be good to do in
176 // general rather than delay for the final stealing phase.
177 // If applicable, we'll transfer a set of objects over to our
178 // work queue, allowing them to be stolen and draining our
179 // private overflow stack.
180 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
181 }
183 bool ParScanThreadState::take_from_overflow_stack() {
184 assert(ParGCUseLocalOverflow, "Else should not call");
185 assert(young_gen()->overflow_list() == NULL, "Error");
186 ObjToScanQueue* queue = work_queue();
187 Stack<oop, mtGC>* const of_stack = overflow_stack();
188 const size_t num_overflow_elems = of_stack->size();
189 const size_t space_available = queue->max_elems() - queue->size();
190 const size_t num_take_elems = MIN3(space_available / 4,
191 ParGCDesiredObjsFromOverflowList,
192 num_overflow_elems);
193 // Transfer the most recent num_take_elems from the overflow
194 // stack to our work queue.
195 for (size_t i = 0; i != num_take_elems; i++) {
196 oop cur = of_stack->pop();
197 oop obj_to_push = cur->forwardee();
198 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
199 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
200 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
201 if (should_be_partially_scanned(obj_to_push, cur)) {
202 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
203 obj_to_push = cur;
204 }
205 bool ok = queue->push(obj_to_push);
206 assert(ok, "Should have succeeded");
207 }
208 assert(young_gen()->overflow_list() == NULL, "Error");
209 return num_take_elems > 0; // was something transferred?
210 }
212 void ParScanThreadState::push_on_overflow_stack(oop p) {
213 assert(ParGCUseLocalOverflow, "Else should not call");
214 overflow_stack()->push(p);
215 assert(young_gen()->overflow_list() == NULL, "Error");
216 }
218 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
220 // Otherwise, if the object is small enough, try to reallocate the
221 // buffer.
222 HeapWord* obj = NULL;
223 if (!_to_space_full) {
224 ParGCAllocBuffer* const plab = to_space_alloc_buffer();
225 Space* const sp = to_space();
226 if (word_sz * 100 <
227 ParallelGCBufferWastePct * plab->word_sz()) {
228 // Is small enough; abandon this buffer and start a new one.
229 plab->retire(false, false);
230 size_t buf_size = plab->word_sz();
231 HeapWord* buf_space = sp->par_allocate(buf_size);
232 if (buf_space == NULL) {
233 const size_t min_bytes =
234 ParGCAllocBuffer::min_size() << LogHeapWordSize;
235 size_t free_bytes = sp->free();
236 while(buf_space == NULL && free_bytes >= min_bytes) {
237 buf_size = free_bytes >> LogHeapWordSize;
238 assert(buf_size == (size_t)align_object_size(buf_size),
239 "Invariant");
240 buf_space = sp->par_allocate(buf_size);
241 free_bytes = sp->free();
242 }
243 }
244 if (buf_space != NULL) {
245 plab->set_word_size(buf_size);
246 plab->set_buf(buf_space);
247 record_survivor_plab(buf_space, buf_size);
248 obj = plab->allocate(word_sz);
249 // Note that we cannot compare buf_size < word_sz below
250 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
251 assert(obj != NULL || plab->words_remaining() < word_sz,
252 "Else should have been able to allocate");
253 // It's conceivable that we may be able to use the
254 // buffer we just grabbed for subsequent small requests
255 // even if not for this one.
256 } else {
257 // We're used up.
258 _to_space_full = true;
259 }
261 } else {
262 // Too large; allocate the object individually.
263 obj = sp->par_allocate(word_sz);
264 }
265 }
266 return obj;
267 }
270 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
271 size_t word_sz) {
272 // Is the alloc in the current alloc buffer?
273 if (to_space_alloc_buffer()->contains(obj)) {
274 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
275 "Should contain whole object.");
276 to_space_alloc_buffer()->undo_allocation(obj, word_sz);
277 } else {
278 CollectedHeap::fill_with_object(obj, word_sz);
279 }
280 }
282 void ParScanThreadState::print_and_clear_promotion_failure_size() {
283 if (_promotion_failure_size != 0) {
284 if (PrintPromotionFailure) {
285 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
286 _thread_num, _promotion_failure_size);
287 }
288 _promotion_failure_size = 0;
289 }
290 }
292 class ParScanThreadStateSet: private ResourceArray {
293 public:
294 // Initializes states for the specified number of threads;
295 ParScanThreadStateSet(int num_threads,
296 Space& to_space,
297 ParNewGeneration& gen,
298 Generation& old_gen,
299 ObjToScanQueueSet& queue_set,
300 Stack<oop, mtGC>* overflow_stacks_,
301 size_t desired_plab_sz,
302 ParallelTaskTerminator& term);
304 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
306 inline ParScanThreadState& thread_state(int i);
308 void reset(int active_workers, bool promotion_failed);
309 void flush();
311 #if TASKQUEUE_STATS
312 static void
313 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
314 void print_termination_stats(outputStream* const st = gclog_or_tty);
315 static void
316 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
317 void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
318 void reset_stats();
319 #endif // TASKQUEUE_STATS
321 private:
322 ParallelTaskTerminator& _term;
323 ParNewGeneration& _gen;
324 Generation& _next_gen;
325 public:
326 bool is_valid(int id) const { return id < length(); }
327 ParallelTaskTerminator* terminator() { return &_term; }
328 };
331 ParScanThreadStateSet::ParScanThreadStateSet(
332 int num_threads, Space& to_space, ParNewGeneration& gen,
333 Generation& old_gen, ObjToScanQueueSet& queue_set,
334 Stack<oop, mtGC>* overflow_stacks,
335 size_t desired_plab_sz, ParallelTaskTerminator& term)
336 : ResourceArray(sizeof(ParScanThreadState), num_threads),
337 _gen(gen), _next_gen(old_gen), _term(term)
338 {
339 assert(num_threads > 0, "sanity check!");
340 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
341 "overflow_stack allocation mismatch");
342 // Initialize states.
343 for (int i = 0; i < num_threads; ++i) {
344 new ((ParScanThreadState*)_data + i)
345 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
346 overflow_stacks, desired_plab_sz, term);
347 }
348 }
350 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
351 {
352 assert(i >= 0 && i < length(), "sanity check!");
353 return ((ParScanThreadState*)_data)[i];
354 }
357 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
358 {
359 _term.reset_for_reuse(active_threads);
360 if (promotion_failed) {
361 for (int i = 0; i < length(); ++i) {
362 thread_state(i).print_and_clear_promotion_failure_size();
363 }
364 }
365 }
367 #if TASKQUEUE_STATS
368 void
369 ParScanThreadState::reset_stats()
370 {
371 taskqueue_stats().reset();
372 _term_attempts = 0;
373 _overflow_refills = 0;
374 _overflow_refill_objs = 0;
375 }
377 void ParScanThreadStateSet::reset_stats()
378 {
379 for (int i = 0; i < length(); ++i) {
380 thread_state(i).reset_stats();
381 }
382 }
384 void
385 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
386 {
387 st->print_raw_cr("GC Termination Stats");
388 st->print_raw_cr(" elapsed --strong roots-- "
389 "-------termination-------");
390 st->print_raw_cr("thr ms ms % "
391 " ms % attempts");
392 st->print_raw_cr("--- --------- --------- ------ "
393 "--------- ------ --------");
394 }
396 void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
397 {
398 print_termination_stats_hdr(st);
400 for (int i = 0; i < length(); ++i) {
401 const ParScanThreadState & pss = thread_state(i);
402 const double elapsed_ms = pss.elapsed_time() * 1000.0;
403 const double s_roots_ms = pss.strong_roots_time() * 1000.0;
404 const double term_ms = pss.term_time() * 1000.0;
405 st->print_cr("%3d %9.2f %9.2f %6.2f "
406 "%9.2f %6.2f " SIZE_FORMAT_W(8),
407 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
408 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
409 }
410 }
412 // Print stats related to work queue activity.
413 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st)
414 {
415 st->print_raw_cr("GC Task Stats");
416 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
417 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
418 }
420 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
421 {
422 print_taskqueue_stats_hdr(st);
424 TaskQueueStats totals;
425 for (int i = 0; i < length(); ++i) {
426 const ParScanThreadState & pss = thread_state(i);
427 const TaskQueueStats & stats = pss.taskqueue_stats();
428 st->print("%3d ", i); stats.print(st); st->cr();
429 totals += stats;
431 if (pss.overflow_refills() > 0) {
432 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills "
433 SIZE_FORMAT_W(10) " overflow objects",
434 pss.overflow_refills(), pss.overflow_refill_objs());
435 }
436 }
437 st->print("tot "); totals.print(st); st->cr();
439 DEBUG_ONLY(totals.verify());
440 }
441 #endif // TASKQUEUE_STATS
443 void ParScanThreadStateSet::flush()
444 {
445 // Work in this loop should be kept as lightweight as
446 // possible since this might otherwise become a bottleneck
447 // to scaling. Should we add heavy-weight work into this
448 // loop, consider parallelizing the loop into the worker threads.
449 for (int i = 0; i < length(); ++i) {
450 ParScanThreadState& par_scan_state = thread_state(i);
452 // Flush stats related to To-space PLAB activity and
453 // retire the last buffer.
454 par_scan_state.to_space_alloc_buffer()->
455 flush_stats_and_retire(_gen.plab_stats(),
456 true /* end_of_gc */,
457 false /* retain */);
459 // Every thread has its own age table. We need to merge
460 // them all into one.
461 ageTable *local_table = par_scan_state.age_table();
462 _gen.age_table()->merge(local_table);
464 // Inform old gen that we're done.
465 _next_gen.par_promote_alloc_done(i);
466 _next_gen.par_oop_since_save_marks_iterate_done(i);
467 }
469 if (UseConcMarkSweepGC && ParallelGCThreads > 0) {
470 // We need to call this even when ResizeOldPLAB is disabled
471 // so as to avoid breaking some asserts. While we may be able
472 // to avoid this by reorganizing the code a bit, I am loathe
473 // to do that unless we find cases where ergo leads to bad
474 // performance.
475 CFLS_LAB::compute_desired_plab_size();
476 }
477 }
479 ParScanClosure::ParScanClosure(ParNewGeneration* g,
480 ParScanThreadState* par_scan_state) :
481 OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
482 {
483 assert(_g->level() == 0, "Optimized for youngest generation");
484 _boundary = _g->reserved().end();
485 }
487 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
488 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
490 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
491 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
493 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
494 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
496 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
497 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
499 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
500 ParScanThreadState* par_scan_state)
501 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
502 {}
504 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
505 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
507 #ifdef WIN32
508 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
509 #endif
511 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
512 ParScanThreadState* par_scan_state_,
513 ParScanWithoutBarrierClosure* to_space_closure_,
514 ParScanWithBarrierClosure* old_gen_closure_,
515 ParRootScanWithoutBarrierClosure* to_space_root_closure_,
516 ParNewGeneration* par_gen_,
517 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
518 ObjToScanQueueSet* task_queues_,
519 ParallelTaskTerminator* terminator_) :
521 _par_scan_state(par_scan_state_),
522 _to_space_closure(to_space_closure_),
523 _old_gen_closure(old_gen_closure_),
524 _to_space_root_closure(to_space_root_closure_),
525 _old_gen_root_closure(old_gen_root_closure_),
526 _par_gen(par_gen_),
527 _task_queues(task_queues_),
528 _terminator(terminator_)
529 {}
531 void ParEvacuateFollowersClosure::do_void() {
532 ObjToScanQueue* work_q = par_scan_state()->work_queue();
534 while (true) {
536 // Scan to-space and old-gen objs until we run out of both.
537 oop obj_to_scan;
538 par_scan_state()->trim_queues(0);
540 // We have no local work, attempt to steal from other threads.
542 // attempt to steal work from promoted.
543 if (task_queues()->steal(par_scan_state()->thread_num(),
544 par_scan_state()->hash_seed(),
545 obj_to_scan)) {
546 bool res = work_q->push(obj_to_scan);
547 assert(res, "Empty queue should have room for a push.");
549 // if successful, goto Start.
550 continue;
552 // try global overflow list.
553 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
554 continue;
555 }
557 // Otherwise, offer termination.
558 par_scan_state()->start_term_time();
559 if (terminator()->offer_termination()) break;
560 par_scan_state()->end_term_time();
561 }
562 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
563 "Broken overflow list?");
564 // Finish the last termination pause.
565 par_scan_state()->end_term_time();
566 }
568 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
569 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
570 AbstractGangTask("ParNewGeneration collection"),
571 _gen(gen), _next_gen(next_gen),
572 _young_old_boundary(young_old_boundary),
573 _state_set(state_set)
574 {}
576 // Reset the terminator for the given number of
577 // active threads.
578 void ParNewGenTask::set_for_termination(int active_workers) {
579 _state_set->reset(active_workers, _gen->promotion_failed());
580 // Should the heap be passed in? There's only 1 for now so
581 // grab it instead.
582 GenCollectedHeap* gch = GenCollectedHeap::heap();
583 gch->set_n_termination(active_workers);
584 }
586 // The "i" passed to this method is the part of the work for
587 // this thread. It is not the worker ID. The "i" is derived
588 // from _started_workers which is incremented in internal_note_start()
589 // called in GangWorker loop() and which is called under the
590 // which is called under the protection of the gang monitor and is
591 // called after a task is started. So "i" is based on
592 // first-come-first-served.
594 void ParNewGenTask::work(uint worker_id) {
595 GenCollectedHeap* gch = GenCollectedHeap::heap();
596 // Since this is being done in a separate thread, need new resource
597 // and handle marks.
598 ResourceMark rm;
599 HandleMark hm;
600 // We would need multiple old-gen queues otherwise.
601 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
603 Generation* old_gen = gch->next_gen(_gen);
605 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
606 assert(_state_set->is_valid(worker_id), "Should not have been called");
608 par_scan_state.set_young_old_boundary(_young_old_boundary);
610 par_scan_state.start_strong_roots();
611 gch->gen_process_strong_roots(_gen->level(),
612 true, // Process younger gens, if any,
613 // as strong roots.
614 false, // no scope; this is parallel code
615 false, // not collecting perm generation.
616 SharedHeap::SO_AllClasses,
617 &par_scan_state.to_space_root_closure(),
618 true, // walk *all* scavengable nmethods
619 &par_scan_state.older_gen_closure());
620 par_scan_state.end_strong_roots();
622 // "evacuate followers".
623 par_scan_state.evacuate_followers_closure().do_void();
624 }
626 #ifdef _MSC_VER
627 #pragma warning( push )
628 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
629 #endif
630 ParNewGeneration::
631 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
632 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
633 _overflow_list(NULL),
634 _is_alive_closure(this),
635 _plab_stats(YoungPLABSize, PLABWeight)
636 {
637 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
638 NOT_PRODUCT(_num_par_pushes = 0;)
639 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
640 guarantee(_task_queues != NULL, "task_queues allocation failure.");
642 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
643 ObjToScanQueue *q = new ObjToScanQueue();
644 guarantee(q != NULL, "work_queue Allocation failure.");
645 _task_queues->register_queue(i1, q);
646 }
648 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
649 _task_queues->queue(i2)->initialize();
651 _overflow_stacks = NULL;
652 if (ParGCUseLocalOverflow) {
654 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
655 // with ','
656 typedef Stack<oop, mtGC> GCOopStack;
658 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
659 for (size_t i = 0; i < ParallelGCThreads; ++i) {
660 new (_overflow_stacks + i) Stack<oop, mtGC>();
661 }
662 }
664 if (UsePerfData) {
665 EXCEPTION_MARK;
666 ResourceMark rm;
668 const char* cname =
669 PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
670 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
671 ParallelGCThreads, CHECK);
672 }
673 }
674 #ifdef _MSC_VER
675 #pragma warning( pop )
676 #endif
678 // ParNewGeneration::
679 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
680 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
682 template <class T>
683 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
684 #ifdef ASSERT
685 {
686 assert(!oopDesc::is_null(*p), "expected non-null ref");
687 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
688 // We never expect to see a null reference being processed
689 // as a weak reference.
690 assert(obj->is_oop(), "expected an oop while scanning weak refs");
691 }
692 #endif // ASSERT
694 _par_cl->do_oop_nv(p);
696 if (Universe::heap()->is_in_reserved(p)) {
697 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
698 _rs->write_ref_field_gc_par(p, obj);
699 }
700 }
702 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
703 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
705 // ParNewGeneration::
706 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
707 DefNewGeneration::KeepAliveClosure(cl) {}
709 template <class T>
710 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
711 #ifdef ASSERT
712 {
713 assert(!oopDesc::is_null(*p), "expected non-null ref");
714 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
715 // We never expect to see a null reference being processed
716 // as a weak reference.
717 assert(obj->is_oop(), "expected an oop while scanning weak refs");
718 }
719 #endif // ASSERT
721 _cl->do_oop_nv(p);
723 if (Universe::heap()->is_in_reserved(p)) {
724 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
725 _rs->write_ref_field_gc_par(p, obj);
726 }
727 }
729 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
730 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
732 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
733 T heap_oop = oopDesc::load_heap_oop(p);
734 if (!oopDesc::is_null(heap_oop)) {
735 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
736 if ((HeapWord*)obj < _boundary) {
737 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
738 oop new_obj = obj->is_forwarded()
739 ? obj->forwardee()
740 : _g->DefNewGeneration::copy_to_survivor_space(obj);
741 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
742 }
743 if (_gc_barrier) {
744 // If p points to a younger generation, mark the card.
745 if ((HeapWord*)obj < _gen_boundary) {
746 _rs->write_ref_field_gc_par(p, obj);
747 }
748 }
749 }
750 }
752 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
753 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
755 class ParNewRefProcTaskProxy: public AbstractGangTask {
756 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
757 public:
758 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,
759 Generation& next_gen,
760 HeapWord* young_old_boundary,
761 ParScanThreadStateSet& state_set);
763 private:
764 virtual void work(uint worker_id);
765 virtual void set_for_termination(int active_workers) {
766 _state_set.terminator()->reset_for_reuse(active_workers);
767 }
768 private:
769 ParNewGeneration& _gen;
770 ProcessTask& _task;
771 Generation& _next_gen;
772 HeapWord* _young_old_boundary;
773 ParScanThreadStateSet& _state_set;
774 };
776 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
777 ProcessTask& task, ParNewGeneration& gen,
778 Generation& next_gen,
779 HeapWord* young_old_boundary,
780 ParScanThreadStateSet& state_set)
781 : AbstractGangTask("ParNewGeneration parallel reference processing"),
782 _gen(gen),
783 _task(task),
784 _next_gen(next_gen),
785 _young_old_boundary(young_old_boundary),
786 _state_set(state_set)
787 {
788 }
790 void ParNewRefProcTaskProxy::work(uint worker_id)
791 {
792 ResourceMark rm;
793 HandleMark hm;
794 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
795 par_scan_state.set_young_old_boundary(_young_old_boundary);
796 _task.work(worker_id, par_scan_state.is_alive_closure(),
797 par_scan_state.keep_alive_closure(),
798 par_scan_state.evacuate_followers_closure());
799 }
801 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
802 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
803 EnqueueTask& _task;
805 public:
806 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
807 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
808 _task(task)
809 { }
811 virtual void work(uint worker_id)
812 {
813 _task.work(worker_id);
814 }
815 };
818 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
819 {
820 GenCollectedHeap* gch = GenCollectedHeap::heap();
821 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
822 "not a generational heap");
823 FlexibleWorkGang* workers = gch->workers();
824 assert(workers != NULL, "Need parallel worker threads.");
825 _state_set.reset(workers->active_workers(), _generation.promotion_failed());
826 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
827 _generation.reserved().end(), _state_set);
828 workers->run_task(&rp_task);
829 _state_set.reset(0 /* bad value in debug if not reset */,
830 _generation.promotion_failed());
831 }
833 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
834 {
835 GenCollectedHeap* gch = GenCollectedHeap::heap();
836 FlexibleWorkGang* workers = gch->workers();
837 assert(workers != NULL, "Need parallel worker threads.");
838 ParNewRefEnqueueTaskProxy enq_task(task);
839 workers->run_task(&enq_task);
840 }
842 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
843 {
844 _state_set.flush();
845 GenCollectedHeap* gch = GenCollectedHeap::heap();
846 gch->set_par_threads(0); // 0 ==> non-parallel.
847 gch->save_marks();
848 }
850 ScanClosureWithParBarrier::
851 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
852 ScanClosure(g, gc_barrier) {}
854 EvacuateFollowersClosureGeneral::
855 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
856 OopsInGenClosure* cur,
857 OopsInGenClosure* older) :
858 _gch(gch), _level(level),
859 _scan_cur_or_nonheap(cur), _scan_older(older)
860 {}
862 void EvacuateFollowersClosureGeneral::do_void() {
863 do {
864 // Beware: this call will lead to closure applications via virtual
865 // calls.
866 _gch->oop_since_save_marks_iterate(_level,
867 _scan_cur_or_nonheap,
868 _scan_older);
869 } while (!_gch->no_allocs_since_save_marks(_level));
870 }
873 bool ParNewGeneration::_avoid_promotion_undo = false;
875 void ParNewGeneration::adjust_desired_tenuring_threshold() {
876 // Set the desired survivor size to half the real survivor space
877 _tenuring_threshold =
878 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
879 }
881 // A Generation that does parallel young-gen collection.
883 void ParNewGeneration::collect(bool full,
884 bool clear_all_soft_refs,
885 size_t size,
886 bool is_tlab) {
887 assert(full || size > 0, "otherwise we don't want to collect");
888 GenCollectedHeap* gch = GenCollectedHeap::heap();
889 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
890 "not a CMS generational heap");
891 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
892 FlexibleWorkGang* workers = gch->workers();
893 assert(workers != NULL, "Need workgang for parallel work");
894 int active_workers =
895 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
896 workers->active_workers(),
897 Threads::number_of_non_daemon_threads());
898 workers->set_active_workers(active_workers);
899 _next_gen = gch->next_gen(this);
900 assert(_next_gen != NULL,
901 "This must be the youngest gen, and not the only gen");
902 assert(gch->n_gens() == 2,
903 "Par collection currently only works with single older gen.");
904 // Do we have to avoid promotion_undo?
905 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
906 set_avoid_promotion_undo(true);
907 }
909 // If the next generation is too full to accomodate worst-case promotion
910 // from this generation, pass on collection; let the next generation
911 // do it.
912 if (!collection_attempt_is_safe()) {
913 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
914 return;
915 }
916 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
918 init_assuming_no_promotion_failure();
920 if (UseAdaptiveSizePolicy) {
921 set_survivor_overflow(false);
922 size_policy->minor_collection_begin();
923 }
925 TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
926 // Capture heap used before collection (for printing).
927 size_t gch_prev_used = gch->used();
929 SpecializationStats::clear();
931 age_table()->clear();
932 to()->clear(SpaceDecorator::Mangle);
934 gch->save_marks();
935 assert(workers != NULL, "Need parallel worker threads.");
936 int n_workers = active_workers;
938 // Set the correct parallelism (number of queues) in the reference processor
939 ref_processor()->set_active_mt_degree(n_workers);
941 // Always set the terminator for the active number of workers
942 // because only those workers go through the termination protocol.
943 ParallelTaskTerminator _term(n_workers, task_queues());
944 ParScanThreadStateSet thread_state_set(workers->active_workers(),
945 *to(), *this, *_next_gen, *task_queues(),
946 _overflow_stacks, desired_plab_sz(), _term);
948 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
949 gch->set_par_threads(n_workers);
950 gch->rem_set()->prepare_for_younger_refs_iterate(true);
951 // It turns out that even when we're using 1 thread, doing the work in a
952 // separate thread causes wide variance in run times. We can't help this
953 // in the multi-threaded case, but we special-case n=1 here to get
954 // repeatable measurements of the 1-thread overhead of the parallel code.
955 if (n_workers > 1) {
956 GenCollectedHeap::StrongRootsScope srs(gch);
957 workers->run_task(&tsk);
958 } else {
959 GenCollectedHeap::StrongRootsScope srs(gch);
960 tsk.work(0);
961 }
962 thread_state_set.reset(0 /* Bad value in debug if not reset */,
963 promotion_failed());
965 // Process (weak) reference objects found during scavenge.
966 ReferenceProcessor* rp = ref_processor();
967 IsAliveClosure is_alive(this);
968 ScanWeakRefClosure scan_weak_ref(this);
969 KeepAliveClosure keep_alive(&scan_weak_ref);
970 ScanClosure scan_without_gc_barrier(this, false);
971 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
972 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
973 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
974 &scan_without_gc_barrier, &scan_with_gc_barrier);
975 rp->setup_policy(clear_all_soft_refs);
976 // Can the mt_degree be set later (at run_task() time would be best)?
977 rp->set_active_mt_degree(active_workers);
978 if (rp->processing_is_mt()) {
979 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
980 rp->process_discovered_references(&is_alive, &keep_alive,
981 &evacuate_followers, &task_executor);
982 } else {
983 thread_state_set.flush();
984 gch->set_par_threads(0); // 0 ==> non-parallel.
985 gch->save_marks();
986 rp->process_discovered_references(&is_alive, &keep_alive,
987 &evacuate_followers, NULL);
988 }
989 if (!promotion_failed()) {
990 // Swap the survivor spaces.
991 eden()->clear(SpaceDecorator::Mangle);
992 from()->clear(SpaceDecorator::Mangle);
993 if (ZapUnusedHeapArea) {
994 // This is now done here because of the piece-meal mangling which
995 // can check for valid mangling at intermediate points in the
996 // collection(s). When a minor collection fails to collect
997 // sufficient space resizing of the young generation can occur
998 // an redistribute the spaces in the young generation. Mangle
999 // here so that unzapped regions don't get distributed to
1000 // other spaces.
1001 to()->mangle_unused_area();
1002 }
1003 swap_spaces();
1005 // A successful scavenge should restart the GC time limit count which is
1006 // for full GC's.
1007 size_policy->reset_gc_overhead_limit_count();
1009 assert(to()->is_empty(), "to space should be empty now");
1010 } else {
1011 assert(_promo_failure_scan_stack.is_empty(), "post condition");
1012 _promo_failure_scan_stack.clear(true); // Clear cached segments.
1014 remove_forwarding_pointers();
1015 if (PrintGCDetails) {
1016 gclog_or_tty->print(" (promotion failed)");
1017 }
1018 // All the spaces are in play for mark-sweep.
1019 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
1020 from()->set_next_compaction_space(to());
1021 gch->set_incremental_collection_failed();
1022 // Inform the next generation that a promotion failure occurred.
1023 _next_gen->promotion_failure_occurred();
1025 // Reset the PromotionFailureALot counters.
1026 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
1027 }
1028 // set new iteration safe limit for the survivor spaces
1029 from()->set_concurrent_iteration_safe_limit(from()->top());
1030 to()->set_concurrent_iteration_safe_limit(to()->top());
1032 adjust_desired_tenuring_threshold();
1033 if (ResizePLAB) {
1034 plab_stats()->adjust_desired_plab_sz();
1035 }
1037 if (PrintGC && !PrintGCDetails) {
1038 gch->print_heap_change(gch_prev_used);
1039 }
1041 if (PrintGCDetails && ParallelGCVerbose) {
1042 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
1043 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
1044 }
1046 if (UseAdaptiveSizePolicy) {
1047 size_policy->minor_collection_end(gch->gc_cause());
1048 size_policy->avg_survived()->sample(from()->used());
1049 }
1051 // We need to use a monotonically non-deccreasing time in ms
1052 // or we will see time-warp warnings and os::javaTimeMillis()
1053 // does not guarantee monotonicity.
1054 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1055 update_time_of_last_gc(now);
1057 SpecializationStats::print();
1059 rp->set_enqueuing_is_done(true);
1060 if (rp->processing_is_mt()) {
1061 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1062 rp->enqueue_discovered_references(&task_executor);
1063 } else {
1064 rp->enqueue_discovered_references(NULL);
1065 }
1066 rp->verify_no_references_recorded();
1067 }
1069 static int sum;
1070 void ParNewGeneration::waste_some_time() {
1071 for (int i = 0; i < 100; i++) {
1072 sum += i;
1073 }
1074 }
1076 static const oop ClaimedForwardPtr = oop(0x4);
1078 // Because of concurrency, there are times where an object for which
1079 // "is_forwarded()" is true contains an "interim" forwarding pointer
1080 // value. Such a value will soon be overwritten with a real value.
1081 // This method requires "obj" to have a forwarding pointer, and waits, if
1082 // necessary for a real one to be inserted, and returns it.
1084 oop ParNewGeneration::real_forwardee(oop obj) {
1085 oop forward_ptr = obj->forwardee();
1086 if (forward_ptr != ClaimedForwardPtr) {
1087 return forward_ptr;
1088 } else {
1089 return real_forwardee_slow(obj);
1090 }
1091 }
1093 oop ParNewGeneration::real_forwardee_slow(oop obj) {
1094 // Spin-read if it is claimed but not yet written by another thread.
1095 oop forward_ptr = obj->forwardee();
1096 while (forward_ptr == ClaimedForwardPtr) {
1097 waste_some_time();
1098 assert(obj->is_forwarded(), "precondition");
1099 forward_ptr = obj->forwardee();
1100 }
1101 return forward_ptr;
1102 }
1104 #ifdef ASSERT
1105 bool ParNewGeneration::is_legal_forward_ptr(oop p) {
1106 return
1107 (_avoid_promotion_undo && p == ClaimedForwardPtr)
1108 || Universe::heap()->is_in_reserved(p);
1109 }
1110 #endif
1112 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
1113 if (m->must_be_preserved_for_promotion_failure(obj)) {
1114 // We should really have separate per-worker stacks, rather
1115 // than use locking of a common pair of stacks.
1116 MutexLocker ml(ParGCRareEvent_lock);
1117 preserve_mark(obj, m);
1118 }
1119 }
1121 // Multiple GC threads may try to promote an object. If the object
1122 // is successfully promoted, a forwarding pointer will be installed in
1123 // the object in the young generation. This method claims the right
1124 // to install the forwarding pointer before it copies the object,
1125 // thus avoiding the need to undo the copy as in
1126 // copy_to_survivor_space_avoiding_with_undo.
1128 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
1129 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
1130 // In the sequential version, this assert also says that the object is
1131 // not forwarded. That might not be the case here. It is the case that
1132 // the caller observed it to be not forwarded at some time in the past.
1133 assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1135 // The sequential code read "old->age()" below. That doesn't work here,
1136 // since the age is in the mark word, and that might be overwritten with
1137 // a forwarding pointer by a parallel thread. So we must save the mark
1138 // word in a local and then analyze it.
1139 oopDesc dummyOld;
1140 dummyOld.set_mark(m);
1141 assert(!dummyOld.is_forwarded(),
1142 "should not be called with forwarding pointer mark word.");
1144 oop new_obj = NULL;
1145 oop forward_ptr;
1147 // Try allocating obj in to-space (unless too old)
1148 if (dummyOld.age() < tenuring_threshold()) {
1149 new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1150 if (new_obj == NULL) {
1151 set_survivor_overflow(true);
1152 }
1153 }
1155 if (new_obj == NULL) {
1156 // Either to-space is full or we decided to promote
1157 // try allocating obj tenured
1159 // Attempt to install a null forwarding pointer (atomically),
1160 // to claim the right to install the real forwarding pointer.
1161 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
1162 if (forward_ptr != NULL) {
1163 // someone else beat us to it.
1164 return real_forwardee(old);
1165 }
1167 new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1168 old, m, sz);
1170 if (new_obj == NULL) {
1171 // promotion failed, forward to self
1172 _promotion_failed = true;
1173 new_obj = old;
1175 preserve_mark_if_necessary(old, m);
1176 // Log the size of the maiden promotion failure
1177 par_scan_state->log_promotion_failure(sz);
1178 }
1180 old->forward_to(new_obj);
1181 forward_ptr = NULL;
1182 } else {
1183 // Is in to-space; do copying ourselves.
1184 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1185 forward_ptr = old->forward_to_atomic(new_obj);
1186 // Restore the mark word copied above.
1187 new_obj->set_mark(m);
1188 // Increment age if obj still in new generation
1189 new_obj->incr_age();
1190 par_scan_state->age_table()->add(new_obj, sz);
1191 }
1192 assert(new_obj != NULL, "just checking");
1194 if (forward_ptr == NULL) {
1195 oop obj_to_push = new_obj;
1196 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1197 // Length field used as index of next element to be scanned.
1198 // Real length can be obtained from real_forwardee()
1199 arrayOop(old)->set_length(0);
1200 obj_to_push = old;
1201 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1202 "push forwarded object");
1203 }
1204 // Push it on one of the queues of to-be-scanned objects.
1205 bool simulate_overflow = false;
1206 NOT_PRODUCT(
1207 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1208 // simulate a stack overflow
1209 simulate_overflow = true;
1210 }
1211 )
1212 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1213 // Add stats for overflow pushes.
1214 if (Verbose && PrintGCDetails) {
1215 gclog_or_tty->print("queue overflow!\n");
1216 }
1217 push_on_overflow_list(old, par_scan_state);
1218 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1219 }
1221 return new_obj;
1222 }
1224 // Oops. Someone beat us to it. Undo the allocation. Where did we
1225 // allocate it?
1226 if (is_in_reserved(new_obj)) {
1227 // Must be in to_space.
1228 assert(to()->is_in_reserved(new_obj), "Checking");
1229 if (forward_ptr == ClaimedForwardPtr) {
1230 // Wait to get the real forwarding pointer value.
1231 forward_ptr = real_forwardee(old);
1232 }
1233 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1234 }
1236 return forward_ptr;
1237 }
1240 // Multiple GC threads may try to promote the same object. If two
1241 // or more GC threads copy the object, only one wins the race to install
1242 // the forwarding pointer. The other threads have to undo their copy.
1244 oop ParNewGeneration::copy_to_survivor_space_with_undo(
1245 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
1247 // In the sequential version, this assert also says that the object is
1248 // not forwarded. That might not be the case here. It is the case that
1249 // the caller observed it to be not forwarded at some time in the past.
1250 assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1252 // The sequential code read "old->age()" below. That doesn't work here,
1253 // since the age is in the mark word, and that might be overwritten with
1254 // a forwarding pointer by a parallel thread. So we must save the mark
1255 // word here, install it in a local oopDesc, and then analyze it.
1256 oopDesc dummyOld;
1257 dummyOld.set_mark(m);
1258 assert(!dummyOld.is_forwarded(),
1259 "should not be called with forwarding pointer mark word.");
1261 bool failed_to_promote = false;
1262 oop new_obj = NULL;
1263 oop forward_ptr;
1265 // Try allocating obj in to-space (unless too old)
1266 if (dummyOld.age() < tenuring_threshold()) {
1267 new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1268 if (new_obj == NULL) {
1269 set_survivor_overflow(true);
1270 }
1271 }
1273 if (new_obj == NULL) {
1274 // Either to-space is full or we decided to promote
1275 // try allocating obj tenured
1276 new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1277 old, m, sz);
1279 if (new_obj == NULL) {
1280 // promotion failed, forward to self
1281 forward_ptr = old->forward_to_atomic(old);
1282 new_obj = old;
1284 if (forward_ptr != NULL) {
1285 return forward_ptr; // someone else succeeded
1286 }
1288 _promotion_failed = true;
1289 failed_to_promote = true;
1291 preserve_mark_if_necessary(old, m);
1292 // Log the size of the maiden promotion failure
1293 par_scan_state->log_promotion_failure(sz);
1294 }
1295 } else {
1296 // Is in to-space; do copying ourselves.
1297 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1298 // Restore the mark word copied above.
1299 new_obj->set_mark(m);
1300 // Increment age if new_obj still in new generation
1301 new_obj->incr_age();
1302 par_scan_state->age_table()->add(new_obj, sz);
1303 }
1304 assert(new_obj != NULL, "just checking");
1306 // Now attempt to install the forwarding pointer (atomically).
1307 // We have to copy the mark word before overwriting with forwarding
1308 // ptr, so we can restore it below in the copy.
1309 if (!failed_to_promote) {
1310 forward_ptr = old->forward_to_atomic(new_obj);
1311 }
1313 if (forward_ptr == NULL) {
1314 oop obj_to_push = new_obj;
1315 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1316 // Length field used as index of next element to be scanned.
1317 // Real length can be obtained from real_forwardee()
1318 arrayOop(old)->set_length(0);
1319 obj_to_push = old;
1320 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1321 "push forwarded object");
1322 }
1323 // Push it on one of the queues of to-be-scanned objects.
1324 bool simulate_overflow = false;
1325 NOT_PRODUCT(
1326 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1327 // simulate a stack overflow
1328 simulate_overflow = true;
1329 }
1330 )
1331 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1332 // Add stats for overflow pushes.
1333 push_on_overflow_list(old, par_scan_state);
1334 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1335 }
1337 return new_obj;
1338 }
1340 // Oops. Someone beat us to it. Undo the allocation. Where did we
1341 // allocate it?
1342 if (is_in_reserved(new_obj)) {
1343 // Must be in to_space.
1344 assert(to()->is_in_reserved(new_obj), "Checking");
1345 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1346 } else {
1347 assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
1348 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
1349 (HeapWord*)new_obj, sz);
1350 }
1352 return forward_ptr;
1353 }
1355 #ifndef PRODUCT
1356 // It's OK to call this multi-threaded; the worst thing
1357 // that can happen is that we'll get a bunch of closely
1358 // spaced simulated oveflows, but that's OK, in fact
1359 // probably good as it would exercise the overflow code
1360 // under contention.
1361 bool ParNewGeneration::should_simulate_overflow() {
1362 if (_overflow_counter-- <= 0) { // just being defensive
1363 _overflow_counter = ParGCWorkQueueOverflowInterval;
1364 return true;
1365 } else {
1366 return false;
1367 }
1368 }
1369 #endif
1371 // In case we are using compressed oops, we need to be careful.
1372 // If the object being pushed is an object array, then its length
1373 // field keeps track of the "grey boundary" at which the next
1374 // incremental scan will be done (see ParGCArrayScanChunk).
1375 // When using compressed oops, this length field is kept in the
1376 // lower 32 bits of the erstwhile klass word and cannot be used
1377 // for the overflow chaining pointer (OCP below). As such the OCP
1378 // would itself need to be compressed into the top 32-bits in this
1379 // case. Unfortunately, see below, in the event that we have a
1380 // promotion failure, the node to be pushed on the list can be
1381 // outside of the Java heap, so the heap-based pointer compression
1382 // would not work (we would have potential aliasing between C-heap
1383 // and Java-heap pointers). For this reason, when using compressed
1384 // oops, we simply use a worker-thread-local, non-shared overflow
1385 // list in the form of a growable array, with a slightly different
1386 // overflow stack draining strategy. If/when we start using fat
1387 // stacks here, we can go back to using (fat) pointer chains
1388 // (although some performance comparisons would be useful since
1389 // single global lists have their own performance disadvantages
1390 // as we were made painfully aware not long ago, see 6786503).
1391 #define BUSY (oop(0x1aff1aff))
1392 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1393 assert(is_in_reserved(from_space_obj), "Should be from this generation");
1394 if (ParGCUseLocalOverflow) {
1395 // In the case of compressed oops, we use a private, not-shared
1396 // overflow stack.
1397 par_scan_state->push_on_overflow_stack(from_space_obj);
1398 } else {
1399 assert(!UseCompressedOops, "Error");
1400 // if the object has been forwarded to itself, then we cannot
1401 // use the klass pointer for the linked list. Instead we have
1402 // to allocate an oopDesc in the C-Heap and use that for the linked list.
1403 // XXX This is horribly inefficient when a promotion failure occurs
1404 // and should be fixed. XXX FIX ME !!!
1405 #ifndef PRODUCT
1406 Atomic::inc_ptr(&_num_par_pushes);
1407 assert(_num_par_pushes > 0, "Tautology");
1408 #endif
1409 if (from_space_obj->forwardee() == from_space_obj) {
1410 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1411 listhead->forward_to(from_space_obj);
1412 from_space_obj = listhead;
1413 }
1414 oop observed_overflow_list = _overflow_list;
1415 oop cur_overflow_list;
1416 do {
1417 cur_overflow_list = observed_overflow_list;
1418 if (cur_overflow_list != BUSY) {
1419 from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1420 } else {
1421 from_space_obj->set_klass_to_list_ptr(NULL);
1422 }
1423 observed_overflow_list =
1424 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
1425 } while (cur_overflow_list != observed_overflow_list);
1426 }
1427 }
1429 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1430 bool res;
1432 if (ParGCUseLocalOverflow) {
1433 res = par_scan_state->take_from_overflow_stack();
1434 } else {
1435 assert(!UseCompressedOops, "Error");
1436 res = take_from_overflow_list_work(par_scan_state);
1437 }
1438 return res;
1439 }
1442 // *NOTE*: The overflow list manipulation code here and
1443 // in CMSCollector:: are very similar in shape,
1444 // except that in the CMS case we thread the objects
1445 // directly into the list via their mark word, and do
1446 // not need to deal with special cases below related
1447 // to chunking of object arrays and promotion failure
1448 // handling.
1449 // CR 6797058 has been filed to attempt consolidation of
1450 // the common code.
1451 // Because of the common code, if you make any changes in
1452 // the code below, please check the CMS version to see if
1453 // similar changes might be needed.
1454 // See CMSCollector::par_take_from_overflow_list() for
1455 // more extensive documentation comments.
1456 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
1457 ObjToScanQueue* work_q = par_scan_state->work_queue();
1458 // How many to take?
1459 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
1460 (size_t)ParGCDesiredObjsFromOverflowList);
1462 assert(!UseCompressedOops, "Error");
1463 assert(par_scan_state->overflow_stack() == NULL, "Error");
1464 if (_overflow_list == NULL) return false;
1466 // Otherwise, there was something there; try claiming the list.
1467 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
1468 // Trim off a prefix of at most objsFromOverflow items
1469 Thread* tid = Thread::current();
1470 size_t spin_count = (size_t)ParallelGCThreads;
1471 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
1472 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
1473 // someone grabbed it before we did ...
1474 // ... we spin for a short while...
1475 os::sleep(tid, sleep_time_millis, false);
1476 if (_overflow_list == NULL) {
1477 // nothing left to take
1478 return false;
1479 } else if (_overflow_list != BUSY) {
1480 // try and grab the prefix
1481 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
1482 }
1483 }
1484 if (prefix == NULL || prefix == BUSY) {
1485 // Nothing to take or waited long enough
1486 if (prefix == NULL) {
1487 // Write back the NULL in case we overwrote it with BUSY above
1488 // and it is still the same value.
1489 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1490 }
1491 return false;
1492 }
1493 assert(prefix != NULL && prefix != BUSY, "Error");
1494 size_t i = 1;
1495 oop cur = prefix;
1496 while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
1497 i++; cur = oop(cur->klass());
1498 }
1500 // Reattach remaining (suffix) to overflow list
1501 if (cur->klass_or_null() == NULL) {
1502 // Write back the NULL in lieu of the BUSY we wrote
1503 // above and it is still the same value.
1504 if (_overflow_list == BUSY) {
1505 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1506 }
1507 } else {
1508 assert(cur->klass_or_null() != BUSY, "Error");
1509 oop suffix = oop(cur->klass()); // suffix will be put back on global list
1510 cur->set_klass_to_list_ptr(NULL); // break off suffix
1511 // It's possible that the list is still in the empty(busy) state
1512 // we left it in a short while ago; in that case we may be
1513 // able to place back the suffix.
1514 oop observed_overflow_list = _overflow_list;
1515 oop cur_overflow_list = observed_overflow_list;
1516 bool attached = false;
1517 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
1518 observed_overflow_list =
1519 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1520 if (cur_overflow_list == observed_overflow_list) {
1521 attached = true;
1522 break;
1523 } else cur_overflow_list = observed_overflow_list;
1524 }
1525 if (!attached) {
1526 // Too bad, someone else got in in between; we'll need to do a splice.
1527 // Find the last item of suffix list
1528 oop last = suffix;
1529 while (last->klass_or_null() != NULL) {
1530 last = oop(last->klass());
1531 }
1532 // Atomically prepend suffix to current overflow list
1533 observed_overflow_list = _overflow_list;
1534 do {
1535 cur_overflow_list = observed_overflow_list;
1536 if (cur_overflow_list != BUSY) {
1537 // Do the splice ...
1538 last->set_klass_to_list_ptr(cur_overflow_list);
1539 } else { // cur_overflow_list == BUSY
1540 last->set_klass_to_list_ptr(NULL);
1541 }
1542 observed_overflow_list =
1543 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1544 } while (cur_overflow_list != observed_overflow_list);
1545 }
1546 }
1548 // Push objects on prefix list onto this thread's work queue
1549 assert(prefix != NULL && prefix != BUSY, "program logic");
1550 cur = prefix;
1551 ssize_t n = 0;
1552 while (cur != NULL) {
1553 oop obj_to_push = cur->forwardee();
1554 oop next = oop(cur->klass_or_null());
1555 cur->set_klass(obj_to_push->klass());
1556 // This may be an array object that is self-forwarded. In that case, the list pointer
1557 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1558 if (!is_in_reserved(cur)) {
1559 // This can become a scaling bottleneck when there is work queue overflow coincident
1560 // with promotion failure.
1561 oopDesc* f = cur;
1562 FREE_C_HEAP_ARRAY(oopDesc, f, mtGC);
1563 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1564 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1565 obj_to_push = cur;
1566 }
1567 bool ok = work_q->push(obj_to_push);
1568 assert(ok, "Should have succeeded");
1569 cur = next;
1570 n++;
1571 }
1572 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1573 #ifndef PRODUCT
1574 assert(_num_par_pushes >= n, "Too many pops?");
1575 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1576 #endif
1577 return true;
1578 }
1579 #undef BUSY
1581 void ParNewGeneration::ref_processor_init()
1582 {
1583 if (_ref_processor == NULL) {
1584 // Allocate and initialize a reference processor
1585 _ref_processor =
1586 new ReferenceProcessor(_reserved, // span
1587 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1588 (int) ParallelGCThreads, // mt processing degree
1589 refs_discovery_is_mt(), // mt discovery
1590 (int) ParallelGCThreads, // mt discovery degree
1591 refs_discovery_is_atomic(), // atomic_discovery
1592 NULL, // is_alive_non_header
1593 false); // write barrier for next field updates
1594 }
1595 }
1597 const char* ParNewGeneration::name() const {
1598 return "par new generation";
1599 }
1601 bool ParNewGeneration::in_use() {
1602 return UseParNewGC && ParallelGCThreads > 0;
1603 }