Tue, 14 Jan 2014 16:40:33 +0100
8032379: Remove the is_scavenging flag to process_strong_roots
Summary: Refactor the strong root processing to avoid using a boolean in addition to the ScanOption enum.
Reviewed-by: stefank, tschatzl, ehelin, jmasa
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
27 #include "gc_implementation/parNew/parNewGeneration.hpp"
28 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
30 #include "gc_implementation/shared/ageTable.hpp"
31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
32 #include "gc_implementation/shared/gcHeapSummary.hpp"
33 #include "gc_implementation/shared/gcTimer.hpp"
34 #include "gc_implementation/shared/gcTrace.hpp"
35 #include "gc_implementation/shared/gcTraceTime.hpp"
36 #include "gc_implementation/shared/copyFailedInfo.hpp"
37 #include "gc_implementation/shared/spaceDecorator.hpp"
38 #include "memory/defNewGeneration.inline.hpp"
39 #include "memory/genCollectedHeap.hpp"
40 #include "memory/genOopClosures.inline.hpp"
41 #include "memory/generation.hpp"
42 #include "memory/generation.inline.hpp"
43 #include "memory/referencePolicy.hpp"
44 #include "memory/resourceArea.hpp"
45 #include "memory/sharedHeap.hpp"
46 #include "memory/space.hpp"
47 #include "oops/objArrayOop.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "oops/oop.pcgc.inline.hpp"
50 #include "runtime/handles.hpp"
51 #include "runtime/handles.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/thread.inline.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/globalDefinitions.hpp"
56 #include "utilities/workgroup.hpp"
58 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
60 #ifdef _MSC_VER
61 #pragma warning( push )
62 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
63 #endif
64 ParScanThreadState::ParScanThreadState(Space* to_space_,
65 ParNewGeneration* gen_,
66 Generation* old_gen_,
67 int thread_num_,
68 ObjToScanQueueSet* work_queue_set_,
69 Stack<oop, mtGC>* overflow_stacks_,
70 size_t desired_plab_sz_,
71 ParallelTaskTerminator& term_) :
72 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
73 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
74 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
75 _ageTable(false), // false ==> not the global age table, no perf data.
76 _to_space_alloc_buffer(desired_plab_sz_),
77 _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
78 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
79 _older_gen_closure(gen_, this),
80 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
81 &_to_space_root_closure, gen_, &_old_gen_root_closure,
82 work_queue_set_, &term_),
83 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
84 _keep_alive_closure(&_scan_weak_ref_closure),
85 _strong_roots_time(0.0), _term_time(0.0)
86 {
87 #if TASKQUEUE_STATS
88 _term_attempts = 0;
89 _overflow_refills = 0;
90 _overflow_refill_objs = 0;
91 #endif // TASKQUEUE_STATS
93 _survivor_chunk_array =
94 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
95 _hash_seed = 17; // Might want to take time-based random value.
96 _start = os::elapsedTime();
97 _old_gen_closure.set_generation(old_gen_);
98 _old_gen_root_closure.set_generation(old_gen_);
99 }
100 #ifdef _MSC_VER
101 #pragma warning( pop )
102 #endif
104 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
105 size_t plab_word_size) {
106 ChunkArray* sca = survivor_chunk_array();
107 if (sca != NULL) {
108 // A non-null SCA implies that we want the PLAB data recorded.
109 sca->record_sample(plab_start, plab_word_size);
110 }
111 }
113 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
114 return new_obj->is_objArray() &&
115 arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
116 new_obj != old_obj;
117 }
119 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
120 assert(old->is_objArray(), "must be obj array");
121 assert(old->is_forwarded(), "must be forwarded");
122 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
123 assert(!old_gen()->is_in(old), "must be in young generation.");
125 objArrayOop obj = objArrayOop(old->forwardee());
126 // Process ParGCArrayScanChunk elements now
127 // and push the remainder back onto queue
128 int start = arrayOop(old)->length();
129 int end = obj->length();
130 int remainder = end - start;
131 assert(start <= end, "just checking");
132 if (remainder > 2 * ParGCArrayScanChunk) {
133 // Test above combines last partial chunk with a full chunk
134 end = start + ParGCArrayScanChunk;
135 arrayOop(old)->set_length(end);
136 // Push remainder.
137 bool ok = work_queue()->push(old);
138 assert(ok, "just popped, push must be okay");
139 } else {
140 // Restore length so that it can be used if there
141 // is a promotion failure and forwarding pointers
142 // must be removed.
143 arrayOop(old)->set_length(end);
144 }
146 // process our set of indices (include header in first chunk)
147 // should make sure end is even (aligned to HeapWord in case of compressed oops)
148 if ((HeapWord *)obj < young_old_boundary()) {
149 // object is in to_space
150 obj->oop_iterate_range(&_to_space_closure, start, end);
151 } else {
152 // object is in old generation
153 obj->oop_iterate_range(&_old_gen_closure, start, end);
154 }
155 }
158 void ParScanThreadState::trim_queues(int max_size) {
159 ObjToScanQueue* queue = work_queue();
160 do {
161 while (queue->size() > (juint)max_size) {
162 oop obj_to_scan;
163 if (queue->pop_local(obj_to_scan)) {
164 if ((HeapWord *)obj_to_scan < young_old_boundary()) {
165 if (obj_to_scan->is_objArray() &&
166 obj_to_scan->is_forwarded() &&
167 obj_to_scan->forwardee() != obj_to_scan) {
168 scan_partial_array_and_push_remainder(obj_to_scan);
169 } else {
170 // object is in to_space
171 obj_to_scan->oop_iterate(&_to_space_closure);
172 }
173 } else {
174 // object is in old generation
175 obj_to_scan->oop_iterate(&_old_gen_closure);
176 }
177 }
178 }
179 // For the case of compressed oops, we have a private, non-shared
180 // overflow stack, so we eagerly drain it so as to more evenly
181 // distribute load early. Note: this may be good to do in
182 // general rather than delay for the final stealing phase.
183 // If applicable, we'll transfer a set of objects over to our
184 // work queue, allowing them to be stolen and draining our
185 // private overflow stack.
186 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
187 }
189 bool ParScanThreadState::take_from_overflow_stack() {
190 assert(ParGCUseLocalOverflow, "Else should not call");
191 assert(young_gen()->overflow_list() == NULL, "Error");
192 ObjToScanQueue* queue = work_queue();
193 Stack<oop, mtGC>* const of_stack = overflow_stack();
194 const size_t num_overflow_elems = of_stack->size();
195 const size_t space_available = queue->max_elems() - queue->size();
196 const size_t num_take_elems = MIN3(space_available / 4,
197 ParGCDesiredObjsFromOverflowList,
198 num_overflow_elems);
199 // Transfer the most recent num_take_elems from the overflow
200 // stack to our work queue.
201 for (size_t i = 0; i != num_take_elems; i++) {
202 oop cur = of_stack->pop();
203 oop obj_to_push = cur->forwardee();
204 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
205 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
206 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
207 if (should_be_partially_scanned(obj_to_push, cur)) {
208 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
209 obj_to_push = cur;
210 }
211 bool ok = queue->push(obj_to_push);
212 assert(ok, "Should have succeeded");
213 }
214 assert(young_gen()->overflow_list() == NULL, "Error");
215 return num_take_elems > 0; // was something transferred?
216 }
218 void ParScanThreadState::push_on_overflow_stack(oop p) {
219 assert(ParGCUseLocalOverflow, "Else should not call");
220 overflow_stack()->push(p);
221 assert(young_gen()->overflow_list() == NULL, "Error");
222 }
224 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
226 // Otherwise, if the object is small enough, try to reallocate the
227 // buffer.
228 HeapWord* obj = NULL;
229 if (!_to_space_full) {
230 ParGCAllocBuffer* const plab = to_space_alloc_buffer();
231 Space* const sp = to_space();
232 if (word_sz * 100 <
233 ParallelGCBufferWastePct * plab->word_sz()) {
234 // Is small enough; abandon this buffer and start a new one.
235 plab->retire(false, false);
236 size_t buf_size = plab->word_sz();
237 HeapWord* buf_space = sp->par_allocate(buf_size);
238 if (buf_space == NULL) {
239 const size_t min_bytes =
240 ParGCAllocBuffer::min_size() << LogHeapWordSize;
241 size_t free_bytes = sp->free();
242 while(buf_space == NULL && free_bytes >= min_bytes) {
243 buf_size = free_bytes >> LogHeapWordSize;
244 assert(buf_size == (size_t)align_object_size(buf_size),
245 "Invariant");
246 buf_space = sp->par_allocate(buf_size);
247 free_bytes = sp->free();
248 }
249 }
250 if (buf_space != NULL) {
251 plab->set_word_size(buf_size);
252 plab->set_buf(buf_space);
253 record_survivor_plab(buf_space, buf_size);
254 obj = plab->allocate(word_sz);
255 // Note that we cannot compare buf_size < word_sz below
256 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
257 assert(obj != NULL || plab->words_remaining() < word_sz,
258 "Else should have been able to allocate");
259 // It's conceivable that we may be able to use the
260 // buffer we just grabbed for subsequent small requests
261 // even if not for this one.
262 } else {
263 // We're used up.
264 _to_space_full = true;
265 }
267 } else {
268 // Too large; allocate the object individually.
269 obj = sp->par_allocate(word_sz);
270 }
271 }
272 return obj;
273 }
276 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
277 size_t word_sz) {
278 // Is the alloc in the current alloc buffer?
279 if (to_space_alloc_buffer()->contains(obj)) {
280 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
281 "Should contain whole object.");
282 to_space_alloc_buffer()->undo_allocation(obj, word_sz);
283 } else {
284 CollectedHeap::fill_with_object(obj, word_sz);
285 }
286 }
288 void ParScanThreadState::print_promotion_failure_size() {
289 if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {
290 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
291 _thread_num, _promotion_failed_info.first_size());
292 }
293 }
295 class ParScanThreadStateSet: private ResourceArray {
296 public:
297 // Initializes states for the specified number of threads;
298 ParScanThreadStateSet(int num_threads,
299 Space& to_space,
300 ParNewGeneration& gen,
301 Generation& old_gen,
302 ObjToScanQueueSet& queue_set,
303 Stack<oop, mtGC>* overflow_stacks_,
304 size_t desired_plab_sz,
305 ParallelTaskTerminator& term);
307 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
309 inline ParScanThreadState& thread_state(int i);
311 void trace_promotion_failed(YoungGCTracer& gc_tracer);
312 void reset(int active_workers, bool promotion_failed);
313 void flush();
315 #if TASKQUEUE_STATS
316 static void
317 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
318 void print_termination_stats(outputStream* const st = gclog_or_tty);
319 static void
320 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
321 void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
322 void reset_stats();
323 #endif // TASKQUEUE_STATS
325 private:
326 ParallelTaskTerminator& _term;
327 ParNewGeneration& _gen;
328 Generation& _next_gen;
329 public:
330 bool is_valid(int id) const { return id < length(); }
331 ParallelTaskTerminator* terminator() { return &_term; }
332 };
335 ParScanThreadStateSet::ParScanThreadStateSet(
336 int num_threads, Space& to_space, ParNewGeneration& gen,
337 Generation& old_gen, ObjToScanQueueSet& queue_set,
338 Stack<oop, mtGC>* overflow_stacks,
339 size_t desired_plab_sz, ParallelTaskTerminator& term)
340 : ResourceArray(sizeof(ParScanThreadState), num_threads),
341 _gen(gen), _next_gen(old_gen), _term(term)
342 {
343 assert(num_threads > 0, "sanity check!");
344 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
345 "overflow_stack allocation mismatch");
346 // Initialize states.
347 for (int i = 0; i < num_threads; ++i) {
348 new ((ParScanThreadState*)_data + i)
349 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
350 overflow_stacks, desired_plab_sz, term);
351 }
352 }
354 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
355 {
356 assert(i >= 0 && i < length(), "sanity check!");
357 return ((ParScanThreadState*)_data)[i];
358 }
360 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
361 for (int i = 0; i < length(); ++i) {
362 if (thread_state(i).promotion_failed()) {
363 gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info());
364 thread_state(i).promotion_failed_info().reset();
365 }
366 }
367 }
369 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
370 {
371 _term.reset_for_reuse(active_threads);
372 if (promotion_failed) {
373 for (int i = 0; i < length(); ++i) {
374 thread_state(i).print_promotion_failure_size();
375 }
376 }
377 }
379 #if TASKQUEUE_STATS
380 void
381 ParScanThreadState::reset_stats()
382 {
383 taskqueue_stats().reset();
384 _term_attempts = 0;
385 _overflow_refills = 0;
386 _overflow_refill_objs = 0;
387 }
389 void ParScanThreadStateSet::reset_stats()
390 {
391 for (int i = 0; i < length(); ++i) {
392 thread_state(i).reset_stats();
393 }
394 }
396 void
397 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
398 {
399 st->print_raw_cr("GC Termination Stats");
400 st->print_raw_cr(" elapsed --strong roots-- "
401 "-------termination-------");
402 st->print_raw_cr("thr ms ms % "
403 " ms % attempts");
404 st->print_raw_cr("--- --------- --------- ------ "
405 "--------- ------ --------");
406 }
408 void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
409 {
410 print_termination_stats_hdr(st);
412 for (int i = 0; i < length(); ++i) {
413 const ParScanThreadState & pss = thread_state(i);
414 const double elapsed_ms = pss.elapsed_time() * 1000.0;
415 const double s_roots_ms = pss.strong_roots_time() * 1000.0;
416 const double term_ms = pss.term_time() * 1000.0;
417 st->print_cr("%3d %9.2f %9.2f %6.2f "
418 "%9.2f %6.2f " SIZE_FORMAT_W(8),
419 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
420 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
421 }
422 }
424 // Print stats related to work queue activity.
425 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st)
426 {
427 st->print_raw_cr("GC Task Stats");
428 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
429 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
430 }
432 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
433 {
434 print_taskqueue_stats_hdr(st);
436 TaskQueueStats totals;
437 for (int i = 0; i < length(); ++i) {
438 const ParScanThreadState & pss = thread_state(i);
439 const TaskQueueStats & stats = pss.taskqueue_stats();
440 st->print("%3d ", i); stats.print(st); st->cr();
441 totals += stats;
443 if (pss.overflow_refills() > 0) {
444 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills "
445 SIZE_FORMAT_W(10) " overflow objects",
446 pss.overflow_refills(), pss.overflow_refill_objs());
447 }
448 }
449 st->print("tot "); totals.print(st); st->cr();
451 DEBUG_ONLY(totals.verify());
452 }
453 #endif // TASKQUEUE_STATS
455 void ParScanThreadStateSet::flush()
456 {
457 // Work in this loop should be kept as lightweight as
458 // possible since this might otherwise become a bottleneck
459 // to scaling. Should we add heavy-weight work into this
460 // loop, consider parallelizing the loop into the worker threads.
461 for (int i = 0; i < length(); ++i) {
462 ParScanThreadState& par_scan_state = thread_state(i);
464 // Flush stats related to To-space PLAB activity and
465 // retire the last buffer.
466 par_scan_state.to_space_alloc_buffer()->
467 flush_stats_and_retire(_gen.plab_stats(),
468 true /* end_of_gc */,
469 false /* retain */);
471 // Every thread has its own age table. We need to merge
472 // them all into one.
473 ageTable *local_table = par_scan_state.age_table();
474 _gen.age_table()->merge(local_table);
476 // Inform old gen that we're done.
477 _next_gen.par_promote_alloc_done(i);
478 _next_gen.par_oop_since_save_marks_iterate_done(i);
479 }
481 if (UseConcMarkSweepGC && ParallelGCThreads > 0) {
482 // We need to call this even when ResizeOldPLAB is disabled
483 // so as to avoid breaking some asserts. While we may be able
484 // to avoid this by reorganizing the code a bit, I am loathe
485 // to do that unless we find cases where ergo leads to bad
486 // performance.
487 CFLS_LAB::compute_desired_plab_size();
488 }
489 }
491 ParScanClosure::ParScanClosure(ParNewGeneration* g,
492 ParScanThreadState* par_scan_state) :
493 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
494 {
495 assert(_g->level() == 0, "Optimized for youngest generation");
496 _boundary = _g->reserved().end();
497 }
499 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
500 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
502 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
503 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
505 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
506 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
508 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
509 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
511 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
512 ParScanThreadState* par_scan_state)
513 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
514 {}
516 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
517 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
519 #ifdef WIN32
520 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
521 #endif
523 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
524 ParScanThreadState* par_scan_state_,
525 ParScanWithoutBarrierClosure* to_space_closure_,
526 ParScanWithBarrierClosure* old_gen_closure_,
527 ParRootScanWithoutBarrierClosure* to_space_root_closure_,
528 ParNewGeneration* par_gen_,
529 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
530 ObjToScanQueueSet* task_queues_,
531 ParallelTaskTerminator* terminator_) :
533 _par_scan_state(par_scan_state_),
534 _to_space_closure(to_space_closure_),
535 _old_gen_closure(old_gen_closure_),
536 _to_space_root_closure(to_space_root_closure_),
537 _old_gen_root_closure(old_gen_root_closure_),
538 _par_gen(par_gen_),
539 _task_queues(task_queues_),
540 _terminator(terminator_)
541 {}
543 void ParEvacuateFollowersClosure::do_void() {
544 ObjToScanQueue* work_q = par_scan_state()->work_queue();
546 while (true) {
548 // Scan to-space and old-gen objs until we run out of both.
549 oop obj_to_scan;
550 par_scan_state()->trim_queues(0);
552 // We have no local work, attempt to steal from other threads.
554 // attempt to steal work from promoted.
555 if (task_queues()->steal(par_scan_state()->thread_num(),
556 par_scan_state()->hash_seed(),
557 obj_to_scan)) {
558 bool res = work_q->push(obj_to_scan);
559 assert(res, "Empty queue should have room for a push.");
561 // if successful, goto Start.
562 continue;
564 // try global overflow list.
565 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
566 continue;
567 }
569 // Otherwise, offer termination.
570 par_scan_state()->start_term_time();
571 if (terminator()->offer_termination()) break;
572 par_scan_state()->end_term_time();
573 }
574 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
575 "Broken overflow list?");
576 // Finish the last termination pause.
577 par_scan_state()->end_term_time();
578 }
580 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
581 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
582 AbstractGangTask("ParNewGeneration collection"),
583 _gen(gen), _next_gen(next_gen),
584 _young_old_boundary(young_old_boundary),
585 _state_set(state_set)
586 {}
588 // Reset the terminator for the given number of
589 // active threads.
590 void ParNewGenTask::set_for_termination(int active_workers) {
591 _state_set->reset(active_workers, _gen->promotion_failed());
592 // Should the heap be passed in? There's only 1 for now so
593 // grab it instead.
594 GenCollectedHeap* gch = GenCollectedHeap::heap();
595 gch->set_n_termination(active_workers);
596 }
598 void ParNewGenTask::work(uint worker_id) {
599 GenCollectedHeap* gch = GenCollectedHeap::heap();
600 // Since this is being done in a separate thread, need new resource
601 // and handle marks.
602 ResourceMark rm;
603 HandleMark hm;
604 // We would need multiple old-gen queues otherwise.
605 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
607 Generation* old_gen = gch->next_gen(_gen);
609 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
610 assert(_state_set->is_valid(worker_id), "Should not have been called");
612 par_scan_state.set_young_old_boundary(_young_old_boundary);
614 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
615 gch->rem_set()->klass_rem_set());
617 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
619 par_scan_state.start_strong_roots();
620 gch->gen_process_strong_roots(_gen->level(),
621 true, // Process younger gens, if any,
622 // as strong roots.
623 false, // no scope; this is parallel code
624 SharedHeap::ScanningOption(so),
625 &par_scan_state.to_space_root_closure(),
626 true, // walk *all* scavengable nmethods
627 &par_scan_state.older_gen_closure(),
628 &klass_scan_closure);
629 par_scan_state.end_strong_roots();
631 // "evacuate followers".
632 par_scan_state.evacuate_followers_closure().do_void();
633 }
635 #ifdef _MSC_VER
636 #pragma warning( push )
637 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
638 #endif
639 ParNewGeneration::
640 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
641 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
642 _overflow_list(NULL),
643 _is_alive_closure(this),
644 _plab_stats(YoungPLABSize, PLABWeight)
645 {
646 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
647 NOT_PRODUCT(_num_par_pushes = 0;)
648 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
649 guarantee(_task_queues != NULL, "task_queues allocation failure.");
651 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
652 ObjToScanQueue *q = new ObjToScanQueue();
653 guarantee(q != NULL, "work_queue Allocation failure.");
654 _task_queues->register_queue(i1, q);
655 }
657 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
658 _task_queues->queue(i2)->initialize();
660 _overflow_stacks = NULL;
661 if (ParGCUseLocalOverflow) {
663 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
664 // with ','
665 typedef Stack<oop, mtGC> GCOopStack;
667 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
668 for (size_t i = 0; i < ParallelGCThreads; ++i) {
669 new (_overflow_stacks + i) Stack<oop, mtGC>();
670 }
671 }
673 if (UsePerfData) {
674 EXCEPTION_MARK;
675 ResourceMark rm;
677 const char* cname =
678 PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
679 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
680 ParallelGCThreads, CHECK);
681 }
682 }
683 #ifdef _MSC_VER
684 #pragma warning( pop )
685 #endif
687 // ParNewGeneration::
688 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
689 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
691 template <class T>
692 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
693 #ifdef ASSERT
694 {
695 assert(!oopDesc::is_null(*p), "expected non-null ref");
696 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
697 // We never expect to see a null reference being processed
698 // as a weak reference.
699 assert(obj->is_oop(), "expected an oop while scanning weak refs");
700 }
701 #endif // ASSERT
703 _par_cl->do_oop_nv(p);
705 if (Universe::heap()->is_in_reserved(p)) {
706 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
707 _rs->write_ref_field_gc_par(p, obj);
708 }
709 }
711 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
712 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
714 // ParNewGeneration::
715 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
716 DefNewGeneration::KeepAliveClosure(cl) {}
718 template <class T>
719 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
720 #ifdef ASSERT
721 {
722 assert(!oopDesc::is_null(*p), "expected non-null ref");
723 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
724 // We never expect to see a null reference being processed
725 // as a weak reference.
726 assert(obj->is_oop(), "expected an oop while scanning weak refs");
727 }
728 #endif // ASSERT
730 _cl->do_oop_nv(p);
732 if (Universe::heap()->is_in_reserved(p)) {
733 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
734 _rs->write_ref_field_gc_par(p, obj);
735 }
736 }
738 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
739 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
741 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
742 T heap_oop = oopDesc::load_heap_oop(p);
743 if (!oopDesc::is_null(heap_oop)) {
744 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
745 if ((HeapWord*)obj < _boundary) {
746 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
747 oop new_obj = obj->is_forwarded()
748 ? obj->forwardee()
749 : _g->DefNewGeneration::copy_to_survivor_space(obj);
750 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
751 }
752 if (_gc_barrier) {
753 // If p points to a younger generation, mark the card.
754 if ((HeapWord*)obj < _gen_boundary) {
755 _rs->write_ref_field_gc_par(p, obj);
756 }
757 }
758 }
759 }
761 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
762 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
764 class ParNewRefProcTaskProxy: public AbstractGangTask {
765 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
766 public:
767 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,
768 Generation& next_gen,
769 HeapWord* young_old_boundary,
770 ParScanThreadStateSet& state_set);
772 private:
773 virtual void work(uint worker_id);
774 virtual void set_for_termination(int active_workers) {
775 _state_set.terminator()->reset_for_reuse(active_workers);
776 }
777 private:
778 ParNewGeneration& _gen;
779 ProcessTask& _task;
780 Generation& _next_gen;
781 HeapWord* _young_old_boundary;
782 ParScanThreadStateSet& _state_set;
783 };
785 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
786 ProcessTask& task, ParNewGeneration& gen,
787 Generation& next_gen,
788 HeapWord* young_old_boundary,
789 ParScanThreadStateSet& state_set)
790 : AbstractGangTask("ParNewGeneration parallel reference processing"),
791 _gen(gen),
792 _task(task),
793 _next_gen(next_gen),
794 _young_old_boundary(young_old_boundary),
795 _state_set(state_set)
796 {
797 }
799 void ParNewRefProcTaskProxy::work(uint worker_id)
800 {
801 ResourceMark rm;
802 HandleMark hm;
803 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
804 par_scan_state.set_young_old_boundary(_young_old_boundary);
805 _task.work(worker_id, par_scan_state.is_alive_closure(),
806 par_scan_state.keep_alive_closure(),
807 par_scan_state.evacuate_followers_closure());
808 }
810 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
811 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
812 EnqueueTask& _task;
814 public:
815 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
816 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
817 _task(task)
818 { }
820 virtual void work(uint worker_id)
821 {
822 _task.work(worker_id);
823 }
824 };
827 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
828 {
829 GenCollectedHeap* gch = GenCollectedHeap::heap();
830 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
831 "not a generational heap");
832 FlexibleWorkGang* workers = gch->workers();
833 assert(workers != NULL, "Need parallel worker threads.");
834 _state_set.reset(workers->active_workers(), _generation.promotion_failed());
835 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
836 _generation.reserved().end(), _state_set);
837 workers->run_task(&rp_task);
838 _state_set.reset(0 /* bad value in debug if not reset */,
839 _generation.promotion_failed());
840 }
842 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
843 {
844 GenCollectedHeap* gch = GenCollectedHeap::heap();
845 FlexibleWorkGang* workers = gch->workers();
846 assert(workers != NULL, "Need parallel worker threads.");
847 ParNewRefEnqueueTaskProxy enq_task(task);
848 workers->run_task(&enq_task);
849 }
851 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
852 {
853 _state_set.flush();
854 GenCollectedHeap* gch = GenCollectedHeap::heap();
855 gch->set_par_threads(0); // 0 ==> non-parallel.
856 gch->save_marks();
857 }
859 ScanClosureWithParBarrier::
860 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
861 ScanClosure(g, gc_barrier) {}
863 EvacuateFollowersClosureGeneral::
864 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
865 OopsInGenClosure* cur,
866 OopsInGenClosure* older) :
867 _gch(gch), _level(level),
868 _scan_cur_or_nonheap(cur), _scan_older(older)
869 {}
871 void EvacuateFollowersClosureGeneral::do_void() {
872 do {
873 // Beware: this call will lead to closure applications via virtual
874 // calls.
875 _gch->oop_since_save_marks_iterate(_level,
876 _scan_cur_or_nonheap,
877 _scan_older);
878 } while (!_gch->no_allocs_since_save_marks(_level));
879 }
882 // A Generation that does parallel young-gen collection.
884 bool ParNewGeneration::_avoid_promotion_undo = false;
886 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
887 assert(_promo_failure_scan_stack.is_empty(), "post condition");
888 _promo_failure_scan_stack.clear(true); // Clear cached segments.
890 remove_forwarding_pointers();
891 if (PrintGCDetails) {
892 gclog_or_tty->print(" (promotion failed)");
893 }
894 // All the spaces are in play for mark-sweep.
895 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
896 from()->set_next_compaction_space(to());
897 gch->set_incremental_collection_failed();
898 // Inform the next generation that a promotion failure occurred.
899 _next_gen->promotion_failure_occurred();
901 // Trace promotion failure in the parallel GC threads
902 thread_state_set.trace_promotion_failed(gc_tracer);
903 // Single threaded code may have reported promotion failure to the global state
904 if (_promotion_failed_info.has_failed()) {
905 gc_tracer.report_promotion_failed(_promotion_failed_info);
906 }
907 // Reset the PromotionFailureALot counters.
908 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
909 }
911 void ParNewGeneration::collect(bool full,
912 bool clear_all_soft_refs,
913 size_t size,
914 bool is_tlab) {
915 assert(full || size > 0, "otherwise we don't want to collect");
917 GenCollectedHeap* gch = GenCollectedHeap::heap();
919 _gc_timer->register_gc_start();
921 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
922 "not a CMS generational heap");
923 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
924 FlexibleWorkGang* workers = gch->workers();
925 assert(workers != NULL, "Need workgang for parallel work");
926 int active_workers =
927 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
928 workers->active_workers(),
929 Threads::number_of_non_daemon_threads());
930 workers->set_active_workers(active_workers);
931 assert(gch->n_gens() == 2,
932 "Par collection currently only works with single older gen.");
933 _next_gen = gch->next_gen(this);
934 // Do we have to avoid promotion_undo?
935 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
936 set_avoid_promotion_undo(true);
937 }
939 // If the next generation is too full to accommodate worst-case promotion
940 // from this generation, pass on collection; let the next generation
941 // do it.
942 if (!collection_attempt_is_safe()) {
943 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
944 return;
945 }
946 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
948 ParNewTracer gc_tracer;
949 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
950 gch->trace_heap_before_gc(&gc_tracer);
952 init_assuming_no_promotion_failure();
954 if (UseAdaptiveSizePolicy) {
955 set_survivor_overflow(false);
956 size_policy->minor_collection_begin();
957 }
959 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
960 // Capture heap used before collection (for printing).
961 size_t gch_prev_used = gch->used();
963 SpecializationStats::clear();
965 age_table()->clear();
966 to()->clear(SpaceDecorator::Mangle);
968 gch->save_marks();
969 assert(workers != NULL, "Need parallel worker threads.");
970 int n_workers = active_workers;
972 // Set the correct parallelism (number of queues) in the reference processor
973 ref_processor()->set_active_mt_degree(n_workers);
975 // Always set the terminator for the active number of workers
976 // because only those workers go through the termination protocol.
977 ParallelTaskTerminator _term(n_workers, task_queues());
978 ParScanThreadStateSet thread_state_set(workers->active_workers(),
979 *to(), *this, *_next_gen, *task_queues(),
980 _overflow_stacks, desired_plab_sz(), _term);
982 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
983 gch->set_par_threads(n_workers);
984 gch->rem_set()->prepare_for_younger_refs_iterate(true);
985 // It turns out that even when we're using 1 thread, doing the work in a
986 // separate thread causes wide variance in run times. We can't help this
987 // in the multi-threaded case, but we special-case n=1 here to get
988 // repeatable measurements of the 1-thread overhead of the parallel code.
989 if (n_workers > 1) {
990 GenCollectedHeap::StrongRootsScope srs(gch);
991 workers->run_task(&tsk);
992 } else {
993 GenCollectedHeap::StrongRootsScope srs(gch);
994 tsk.work(0);
995 }
996 thread_state_set.reset(0 /* Bad value in debug if not reset */,
997 promotion_failed());
999 // Process (weak) reference objects found during scavenge.
1000 ReferenceProcessor* rp = ref_processor();
1001 IsAliveClosure is_alive(this);
1002 ScanWeakRefClosure scan_weak_ref(this);
1003 KeepAliveClosure keep_alive(&scan_weak_ref);
1004 ScanClosure scan_without_gc_barrier(this, false);
1005 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
1006 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
1007 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
1008 &scan_without_gc_barrier, &scan_with_gc_barrier);
1009 rp->setup_policy(clear_all_soft_refs);
1010 // Can the mt_degree be set later (at run_task() time would be best)?
1011 rp->set_active_mt_degree(active_workers);
1012 ReferenceProcessorStats stats;
1013 if (rp->processing_is_mt()) {
1014 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1015 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1016 &evacuate_followers, &task_executor,
1017 _gc_timer, gc_tracer.gc_id());
1018 } else {
1019 thread_state_set.flush();
1020 gch->set_par_threads(0); // 0 ==> non-parallel.
1021 gch->save_marks();
1022 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1023 &evacuate_followers, NULL,
1024 _gc_timer, gc_tracer.gc_id());
1025 }
1026 gc_tracer.report_gc_reference_stats(stats);
1027 if (!promotion_failed()) {
1028 // Swap the survivor spaces.
1029 eden()->clear(SpaceDecorator::Mangle);
1030 from()->clear(SpaceDecorator::Mangle);
1031 if (ZapUnusedHeapArea) {
1032 // This is now done here because of the piece-meal mangling which
1033 // can check for valid mangling at intermediate points in the
1034 // collection(s). When a minor collection fails to collect
1035 // sufficient space resizing of the young generation can occur
1036 // an redistribute the spaces in the young generation. Mangle
1037 // here so that unzapped regions don't get distributed to
1038 // other spaces.
1039 to()->mangle_unused_area();
1040 }
1041 swap_spaces();
1043 // A successful scavenge should restart the GC time limit count which is
1044 // for full GC's.
1045 size_policy->reset_gc_overhead_limit_count();
1047 assert(to()->is_empty(), "to space should be empty now");
1049 adjust_desired_tenuring_threshold();
1050 } else {
1051 handle_promotion_failed(gch, thread_state_set, gc_tracer);
1052 }
1053 // set new iteration safe limit for the survivor spaces
1054 from()->set_concurrent_iteration_safe_limit(from()->top());
1055 to()->set_concurrent_iteration_safe_limit(to()->top());
1057 if (ResizePLAB) {
1058 plab_stats()->adjust_desired_plab_sz(n_workers);
1059 }
1061 if (PrintGC && !PrintGCDetails) {
1062 gch->print_heap_change(gch_prev_used);
1063 }
1065 if (PrintGCDetails && ParallelGCVerbose) {
1066 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
1067 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
1068 }
1070 if (UseAdaptiveSizePolicy) {
1071 size_policy->minor_collection_end(gch->gc_cause());
1072 size_policy->avg_survived()->sample(from()->used());
1073 }
1075 // We need to use a monotonically non-deccreasing time in ms
1076 // or we will see time-warp warnings and os::javaTimeMillis()
1077 // does not guarantee monotonicity.
1078 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1079 update_time_of_last_gc(now);
1081 SpecializationStats::print();
1083 rp->set_enqueuing_is_done(true);
1084 if (rp->processing_is_mt()) {
1085 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1086 rp->enqueue_discovered_references(&task_executor);
1087 } else {
1088 rp->enqueue_discovered_references(NULL);
1089 }
1090 rp->verify_no_references_recorded();
1092 gch->trace_heap_after_gc(&gc_tracer);
1093 gc_tracer.report_tenuring_threshold(tenuring_threshold());
1095 _gc_timer->register_gc_end();
1097 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1098 }
1100 static int sum;
1101 void ParNewGeneration::waste_some_time() {
1102 for (int i = 0; i < 100; i++) {
1103 sum += i;
1104 }
1105 }
1107 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1109 // Because of concurrency, there are times where an object for which
1110 // "is_forwarded()" is true contains an "interim" forwarding pointer
1111 // value. Such a value will soon be overwritten with a real value.
1112 // This method requires "obj" to have a forwarding pointer, and waits, if
1113 // necessary for a real one to be inserted, and returns it.
1115 oop ParNewGeneration::real_forwardee(oop obj) {
1116 oop forward_ptr = obj->forwardee();
1117 if (forward_ptr != ClaimedForwardPtr) {
1118 return forward_ptr;
1119 } else {
1120 return real_forwardee_slow(obj);
1121 }
1122 }
1124 oop ParNewGeneration::real_forwardee_slow(oop obj) {
1125 // Spin-read if it is claimed but not yet written by another thread.
1126 oop forward_ptr = obj->forwardee();
1127 while (forward_ptr == ClaimedForwardPtr) {
1128 waste_some_time();
1129 assert(obj->is_forwarded(), "precondition");
1130 forward_ptr = obj->forwardee();
1131 }
1132 return forward_ptr;
1133 }
1135 #ifdef ASSERT
1136 bool ParNewGeneration::is_legal_forward_ptr(oop p) {
1137 return
1138 (_avoid_promotion_undo && p == ClaimedForwardPtr)
1139 || Universe::heap()->is_in_reserved(p);
1140 }
1141 #endif
1143 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
1144 if (m->must_be_preserved_for_promotion_failure(obj)) {
1145 // We should really have separate per-worker stacks, rather
1146 // than use locking of a common pair of stacks.
1147 MutexLocker ml(ParGCRareEvent_lock);
1148 preserve_mark(obj, m);
1149 }
1150 }
1152 // Multiple GC threads may try to promote an object. If the object
1153 // is successfully promoted, a forwarding pointer will be installed in
1154 // the object in the young generation. This method claims the right
1155 // to install the forwarding pointer before it copies the object,
1156 // thus avoiding the need to undo the copy as in
1157 // copy_to_survivor_space_avoiding_with_undo.
1159 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
1160 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
1161 // In the sequential version, this assert also says that the object is
1162 // not forwarded. That might not be the case here. It is the case that
1163 // the caller observed it to be not forwarded at some time in the past.
1164 assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1166 // The sequential code read "old->age()" below. That doesn't work here,
1167 // since the age is in the mark word, and that might be overwritten with
1168 // a forwarding pointer by a parallel thread. So we must save the mark
1169 // word in a local and then analyze it.
1170 oopDesc dummyOld;
1171 dummyOld.set_mark(m);
1172 assert(!dummyOld.is_forwarded(),
1173 "should not be called with forwarding pointer mark word.");
1175 oop new_obj = NULL;
1176 oop forward_ptr;
1178 // Try allocating obj in to-space (unless too old)
1179 if (dummyOld.age() < tenuring_threshold()) {
1180 new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1181 if (new_obj == NULL) {
1182 set_survivor_overflow(true);
1183 }
1184 }
1186 if (new_obj == NULL) {
1187 // Either to-space is full or we decided to promote
1188 // try allocating obj tenured
1190 // Attempt to install a null forwarding pointer (atomically),
1191 // to claim the right to install the real forwarding pointer.
1192 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
1193 if (forward_ptr != NULL) {
1194 // someone else beat us to it.
1195 return real_forwardee(old);
1196 }
1198 new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1199 old, m, sz);
1201 if (new_obj == NULL) {
1202 // promotion failed, forward to self
1203 _promotion_failed = true;
1204 new_obj = old;
1206 preserve_mark_if_necessary(old, m);
1207 par_scan_state->register_promotion_failure(sz);
1208 }
1210 old->forward_to(new_obj);
1211 forward_ptr = NULL;
1212 } else {
1213 // Is in to-space; do copying ourselves.
1214 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1215 forward_ptr = old->forward_to_atomic(new_obj);
1216 // Restore the mark word copied above.
1217 new_obj->set_mark(m);
1218 // Increment age if obj still in new generation
1219 new_obj->incr_age();
1220 par_scan_state->age_table()->add(new_obj, sz);
1221 }
1222 assert(new_obj != NULL, "just checking");
1224 #ifndef PRODUCT
1225 // This code must come after the CAS test, or it will print incorrect
1226 // information.
1227 if (TraceScavenge) {
1228 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1229 is_in_reserved(new_obj) ? "copying" : "tenuring",
1230 new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
1231 }
1232 #endif
1234 if (forward_ptr == NULL) {
1235 oop obj_to_push = new_obj;
1236 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1237 // Length field used as index of next element to be scanned.
1238 // Real length can be obtained from real_forwardee()
1239 arrayOop(old)->set_length(0);
1240 obj_to_push = old;
1241 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1242 "push forwarded object");
1243 }
1244 // Push it on one of the queues of to-be-scanned objects.
1245 bool simulate_overflow = false;
1246 NOT_PRODUCT(
1247 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1248 // simulate a stack overflow
1249 simulate_overflow = true;
1250 }
1251 )
1252 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1253 // Add stats for overflow pushes.
1254 if (Verbose && PrintGCDetails) {
1255 gclog_or_tty->print("queue overflow!\n");
1256 }
1257 push_on_overflow_list(old, par_scan_state);
1258 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1259 }
1261 return new_obj;
1262 }
1264 // Oops. Someone beat us to it. Undo the allocation. Where did we
1265 // allocate it?
1266 if (is_in_reserved(new_obj)) {
1267 // Must be in to_space.
1268 assert(to()->is_in_reserved(new_obj), "Checking");
1269 if (forward_ptr == ClaimedForwardPtr) {
1270 // Wait to get the real forwarding pointer value.
1271 forward_ptr = real_forwardee(old);
1272 }
1273 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1274 }
1276 return forward_ptr;
1277 }
1280 // Multiple GC threads may try to promote the same object. If two
1281 // or more GC threads copy the object, only one wins the race to install
1282 // the forwarding pointer. The other threads have to undo their copy.
1284 oop ParNewGeneration::copy_to_survivor_space_with_undo(
1285 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
1287 // In the sequential version, this assert also says that the object is
1288 // not forwarded. That might not be the case here. It is the case that
1289 // the caller observed it to be not forwarded at some time in the past.
1290 assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1292 // The sequential code read "old->age()" below. That doesn't work here,
1293 // since the age is in the mark word, and that might be overwritten with
1294 // a forwarding pointer by a parallel thread. So we must save the mark
1295 // word here, install it in a local oopDesc, and then analyze it.
1296 oopDesc dummyOld;
1297 dummyOld.set_mark(m);
1298 assert(!dummyOld.is_forwarded(),
1299 "should not be called with forwarding pointer mark word.");
1301 bool failed_to_promote = false;
1302 oop new_obj = NULL;
1303 oop forward_ptr;
1305 // Try allocating obj in to-space (unless too old)
1306 if (dummyOld.age() < tenuring_threshold()) {
1307 new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1308 if (new_obj == NULL) {
1309 set_survivor_overflow(true);
1310 }
1311 }
1313 if (new_obj == NULL) {
1314 // Either to-space is full or we decided to promote
1315 // try allocating obj tenured
1316 new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1317 old, m, sz);
1319 if (new_obj == NULL) {
1320 // promotion failed, forward to self
1321 forward_ptr = old->forward_to_atomic(old);
1322 new_obj = old;
1324 if (forward_ptr != NULL) {
1325 return forward_ptr; // someone else succeeded
1326 }
1328 _promotion_failed = true;
1329 failed_to_promote = true;
1331 preserve_mark_if_necessary(old, m);
1332 par_scan_state->register_promotion_failure(sz);
1333 }
1334 } else {
1335 // Is in to-space; do copying ourselves.
1336 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1337 // Restore the mark word copied above.
1338 new_obj->set_mark(m);
1339 // Increment age if new_obj still in new generation
1340 new_obj->incr_age();
1341 par_scan_state->age_table()->add(new_obj, sz);
1342 }
1343 assert(new_obj != NULL, "just checking");
1345 #ifndef PRODUCT
1346 // This code must come after the CAS test, or it will print incorrect
1347 // information.
1348 if (TraceScavenge) {
1349 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1350 is_in_reserved(new_obj) ? "copying" : "tenuring",
1351 new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
1352 }
1353 #endif
1355 // Now attempt to install the forwarding pointer (atomically).
1356 // We have to copy the mark word before overwriting with forwarding
1357 // ptr, so we can restore it below in the copy.
1358 if (!failed_to_promote) {
1359 forward_ptr = old->forward_to_atomic(new_obj);
1360 }
1362 if (forward_ptr == NULL) {
1363 oop obj_to_push = new_obj;
1364 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1365 // Length field used as index of next element to be scanned.
1366 // Real length can be obtained from real_forwardee()
1367 arrayOop(old)->set_length(0);
1368 obj_to_push = old;
1369 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1370 "push forwarded object");
1371 }
1372 // Push it on one of the queues of to-be-scanned objects.
1373 bool simulate_overflow = false;
1374 NOT_PRODUCT(
1375 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1376 // simulate a stack overflow
1377 simulate_overflow = true;
1378 }
1379 )
1380 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1381 // Add stats for overflow pushes.
1382 push_on_overflow_list(old, par_scan_state);
1383 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1384 }
1386 return new_obj;
1387 }
1389 // Oops. Someone beat us to it. Undo the allocation. Where did we
1390 // allocate it?
1391 if (is_in_reserved(new_obj)) {
1392 // Must be in to_space.
1393 assert(to()->is_in_reserved(new_obj), "Checking");
1394 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1395 } else {
1396 assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
1397 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
1398 (HeapWord*)new_obj, sz);
1399 }
1401 return forward_ptr;
1402 }
1404 #ifndef PRODUCT
1405 // It's OK to call this multi-threaded; the worst thing
1406 // that can happen is that we'll get a bunch of closely
1407 // spaced simulated oveflows, but that's OK, in fact
1408 // probably good as it would exercise the overflow code
1409 // under contention.
1410 bool ParNewGeneration::should_simulate_overflow() {
1411 if (_overflow_counter-- <= 0) { // just being defensive
1412 _overflow_counter = ParGCWorkQueueOverflowInterval;
1413 return true;
1414 } else {
1415 return false;
1416 }
1417 }
1418 #endif
1420 // In case we are using compressed oops, we need to be careful.
1421 // If the object being pushed is an object array, then its length
1422 // field keeps track of the "grey boundary" at which the next
1423 // incremental scan will be done (see ParGCArrayScanChunk).
1424 // When using compressed oops, this length field is kept in the
1425 // lower 32 bits of the erstwhile klass word and cannot be used
1426 // for the overflow chaining pointer (OCP below). As such the OCP
1427 // would itself need to be compressed into the top 32-bits in this
1428 // case. Unfortunately, see below, in the event that we have a
1429 // promotion failure, the node to be pushed on the list can be
1430 // outside of the Java heap, so the heap-based pointer compression
1431 // would not work (we would have potential aliasing between C-heap
1432 // and Java-heap pointers). For this reason, when using compressed
1433 // oops, we simply use a worker-thread-local, non-shared overflow
1434 // list in the form of a growable array, with a slightly different
1435 // overflow stack draining strategy. If/when we start using fat
1436 // stacks here, we can go back to using (fat) pointer chains
1437 // (although some performance comparisons would be useful since
1438 // single global lists have their own performance disadvantages
1439 // as we were made painfully aware not long ago, see 6786503).
1440 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1441 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1442 assert(is_in_reserved(from_space_obj), "Should be from this generation");
1443 if (ParGCUseLocalOverflow) {
1444 // In the case of compressed oops, we use a private, not-shared
1445 // overflow stack.
1446 par_scan_state->push_on_overflow_stack(from_space_obj);
1447 } else {
1448 assert(!UseCompressedOops, "Error");
1449 // if the object has been forwarded to itself, then we cannot
1450 // use the klass pointer for the linked list. Instead we have
1451 // to allocate an oopDesc in the C-Heap and use that for the linked list.
1452 // XXX This is horribly inefficient when a promotion failure occurs
1453 // and should be fixed. XXX FIX ME !!!
1454 #ifndef PRODUCT
1455 Atomic::inc_ptr(&_num_par_pushes);
1456 assert(_num_par_pushes > 0, "Tautology");
1457 #endif
1458 if (from_space_obj->forwardee() == from_space_obj) {
1459 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1460 listhead->forward_to(from_space_obj);
1461 from_space_obj = listhead;
1462 }
1463 oop observed_overflow_list = _overflow_list;
1464 oop cur_overflow_list;
1465 do {
1466 cur_overflow_list = observed_overflow_list;
1467 if (cur_overflow_list != BUSY) {
1468 from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1469 } else {
1470 from_space_obj->set_klass_to_list_ptr(NULL);
1471 }
1472 observed_overflow_list =
1473 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
1474 } while (cur_overflow_list != observed_overflow_list);
1475 }
1476 }
1478 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1479 bool res;
1481 if (ParGCUseLocalOverflow) {
1482 res = par_scan_state->take_from_overflow_stack();
1483 } else {
1484 assert(!UseCompressedOops, "Error");
1485 res = take_from_overflow_list_work(par_scan_state);
1486 }
1487 return res;
1488 }
1491 // *NOTE*: The overflow list manipulation code here and
1492 // in CMSCollector:: are very similar in shape,
1493 // except that in the CMS case we thread the objects
1494 // directly into the list via their mark word, and do
1495 // not need to deal with special cases below related
1496 // to chunking of object arrays and promotion failure
1497 // handling.
1498 // CR 6797058 has been filed to attempt consolidation of
1499 // the common code.
1500 // Because of the common code, if you make any changes in
1501 // the code below, please check the CMS version to see if
1502 // similar changes might be needed.
1503 // See CMSCollector::par_take_from_overflow_list() for
1504 // more extensive documentation comments.
1505 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
1506 ObjToScanQueue* work_q = par_scan_state->work_queue();
1507 // How many to take?
1508 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
1509 (size_t)ParGCDesiredObjsFromOverflowList);
1511 assert(!UseCompressedOops, "Error");
1512 assert(par_scan_state->overflow_stack() == NULL, "Error");
1513 if (_overflow_list == NULL) return false;
1515 // Otherwise, there was something there; try claiming the list.
1516 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
1517 // Trim off a prefix of at most objsFromOverflow items
1518 Thread* tid = Thread::current();
1519 size_t spin_count = (size_t)ParallelGCThreads;
1520 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
1521 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
1522 // someone grabbed it before we did ...
1523 // ... we spin for a short while...
1524 os::sleep(tid, sleep_time_millis, false);
1525 if (_overflow_list == NULL) {
1526 // nothing left to take
1527 return false;
1528 } else if (_overflow_list != BUSY) {
1529 // try and grab the prefix
1530 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
1531 }
1532 }
1533 if (prefix == NULL || prefix == BUSY) {
1534 // Nothing to take or waited long enough
1535 if (prefix == NULL) {
1536 // Write back the NULL in case we overwrote it with BUSY above
1537 // and it is still the same value.
1538 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1539 }
1540 return false;
1541 }
1542 assert(prefix != NULL && prefix != BUSY, "Error");
1543 size_t i = 1;
1544 oop cur = prefix;
1545 while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
1546 i++; cur = cur->list_ptr_from_klass();
1547 }
1549 // Reattach remaining (suffix) to overflow list
1550 if (cur->klass_or_null() == NULL) {
1551 // Write back the NULL in lieu of the BUSY we wrote
1552 // above and it is still the same value.
1553 if (_overflow_list == BUSY) {
1554 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1555 }
1556 } else {
1557 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error");
1558 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list
1559 cur->set_klass_to_list_ptr(NULL); // break off suffix
1560 // It's possible that the list is still in the empty(busy) state
1561 // we left it in a short while ago; in that case we may be
1562 // able to place back the suffix.
1563 oop observed_overflow_list = _overflow_list;
1564 oop cur_overflow_list = observed_overflow_list;
1565 bool attached = false;
1566 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
1567 observed_overflow_list =
1568 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1569 if (cur_overflow_list == observed_overflow_list) {
1570 attached = true;
1571 break;
1572 } else cur_overflow_list = observed_overflow_list;
1573 }
1574 if (!attached) {
1575 // Too bad, someone else got in in between; we'll need to do a splice.
1576 // Find the last item of suffix list
1577 oop last = suffix;
1578 while (last->klass_or_null() != NULL) {
1579 last = last->list_ptr_from_klass();
1580 }
1581 // Atomically prepend suffix to current overflow list
1582 observed_overflow_list = _overflow_list;
1583 do {
1584 cur_overflow_list = observed_overflow_list;
1585 if (cur_overflow_list != BUSY) {
1586 // Do the splice ...
1587 last->set_klass_to_list_ptr(cur_overflow_list);
1588 } else { // cur_overflow_list == BUSY
1589 last->set_klass_to_list_ptr(NULL);
1590 }
1591 observed_overflow_list =
1592 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1593 } while (cur_overflow_list != observed_overflow_list);
1594 }
1595 }
1597 // Push objects on prefix list onto this thread's work queue
1598 assert(prefix != NULL && prefix != BUSY, "program logic");
1599 cur = prefix;
1600 ssize_t n = 0;
1601 while (cur != NULL) {
1602 oop obj_to_push = cur->forwardee();
1603 oop next = cur->list_ptr_from_klass();
1604 cur->set_klass(obj_to_push->klass());
1605 // This may be an array object that is self-forwarded. In that case, the list pointer
1606 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1607 if (!is_in_reserved(cur)) {
1608 // This can become a scaling bottleneck when there is work queue overflow coincident
1609 // with promotion failure.
1610 oopDesc* f = cur;
1611 FREE_C_HEAP_ARRAY(oopDesc, f, mtGC);
1612 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1613 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1614 obj_to_push = cur;
1615 }
1616 bool ok = work_q->push(obj_to_push);
1617 assert(ok, "Should have succeeded");
1618 cur = next;
1619 n++;
1620 }
1621 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1622 #ifndef PRODUCT
1623 assert(_num_par_pushes >= n, "Too many pops?");
1624 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1625 #endif
1626 return true;
1627 }
1628 #undef BUSY
1630 void ParNewGeneration::ref_processor_init() {
1631 if (_ref_processor == NULL) {
1632 // Allocate and initialize a reference processor
1633 _ref_processor =
1634 new ReferenceProcessor(_reserved, // span
1635 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1636 (int) ParallelGCThreads, // mt processing degree
1637 refs_discovery_is_mt(), // mt discovery
1638 (int) ParallelGCThreads, // mt discovery degree
1639 refs_discovery_is_atomic(), // atomic_discovery
1640 NULL); // is_alive_non_header
1641 }
1642 }
1644 const char* ParNewGeneration::name() const {
1645 return "par new generation";
1646 }