Sat, 23 Oct 2010 23:03:49 -0700
6896603: CMS/GCH: collection_attempt_is_safe() ergo should use more recent data
Summary: Deprecated HandlePromotionFailure, removing the ability to turn off that feature, did away with one epoch look-ahead when deciding if a scavenge is likely to fail, relying on current data.
Reviewed-by: jmasa, johnc, poonam
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_parNewGeneration.cpp.incl"
28 #ifdef _MSC_VER
29 #pragma warning( push )
30 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
31 #endif
32 ParScanThreadState::ParScanThreadState(Space* to_space_,
33 ParNewGeneration* gen_,
34 Generation* old_gen_,
35 int thread_num_,
36 ObjToScanQueueSet* work_queue_set_,
37 Stack<oop>* overflow_stacks_,
38 size_t desired_plab_sz_,
39 ParallelTaskTerminator& term_) :
40 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
41 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
42 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
43 _ageTable(false), // false ==> not the global age table, no perf data.
44 _to_space_alloc_buffer(desired_plab_sz_),
45 _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
46 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
47 _older_gen_closure(gen_, this),
48 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
49 &_to_space_root_closure, gen_, &_old_gen_root_closure,
50 work_queue_set_, &term_),
51 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
52 _keep_alive_closure(&_scan_weak_ref_closure),
53 _promotion_failure_size(0),
54 _strong_roots_time(0.0), _term_time(0.0)
55 {
56 #if TASKQUEUE_STATS
57 _term_attempts = 0;
58 _overflow_refills = 0;
59 _overflow_refill_objs = 0;
60 #endif // TASKQUEUE_STATS
62 _survivor_chunk_array =
63 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
64 _hash_seed = 17; // Might want to take time-based random value.
65 _start = os::elapsedTime();
66 _old_gen_closure.set_generation(old_gen_);
67 _old_gen_root_closure.set_generation(old_gen_);
68 }
69 #ifdef _MSC_VER
70 #pragma warning( pop )
71 #endif
73 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
74 size_t plab_word_size) {
75 ChunkArray* sca = survivor_chunk_array();
76 if (sca != NULL) {
77 // A non-null SCA implies that we want the PLAB data recorded.
78 sca->record_sample(plab_start, plab_word_size);
79 }
80 }
82 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
83 return new_obj->is_objArray() &&
84 arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
85 new_obj != old_obj;
86 }
88 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
89 assert(old->is_objArray(), "must be obj array");
90 assert(old->is_forwarded(), "must be forwarded");
91 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
92 assert(!old_gen()->is_in(old), "must be in young generation.");
94 objArrayOop obj = objArrayOop(old->forwardee());
95 // Process ParGCArrayScanChunk elements now
96 // and push the remainder back onto queue
97 int start = arrayOop(old)->length();
98 int end = obj->length();
99 int remainder = end - start;
100 assert(start <= end, "just checking");
101 if (remainder > 2 * ParGCArrayScanChunk) {
102 // Test above combines last partial chunk with a full chunk
103 end = start + ParGCArrayScanChunk;
104 arrayOop(old)->set_length(end);
105 // Push remainder.
106 bool ok = work_queue()->push(old);
107 assert(ok, "just popped, push must be okay");
108 } else {
109 // Restore length so that it can be used if there
110 // is a promotion failure and forwarding pointers
111 // must be removed.
112 arrayOop(old)->set_length(end);
113 }
115 // process our set of indices (include header in first chunk)
116 // should make sure end is even (aligned to HeapWord in case of compressed oops)
117 if ((HeapWord *)obj < young_old_boundary()) {
118 // object is in to_space
119 obj->oop_iterate_range(&_to_space_closure, start, end);
120 } else {
121 // object is in old generation
122 obj->oop_iterate_range(&_old_gen_closure, start, end);
123 }
124 }
127 void ParScanThreadState::trim_queues(int max_size) {
128 ObjToScanQueue* queue = work_queue();
129 do {
130 while (queue->size() > (juint)max_size) {
131 oop obj_to_scan;
132 if (queue->pop_local(obj_to_scan)) {
133 if ((HeapWord *)obj_to_scan < young_old_boundary()) {
134 if (obj_to_scan->is_objArray() &&
135 obj_to_scan->is_forwarded() &&
136 obj_to_scan->forwardee() != obj_to_scan) {
137 scan_partial_array_and_push_remainder(obj_to_scan);
138 } else {
139 // object is in to_space
140 obj_to_scan->oop_iterate(&_to_space_closure);
141 }
142 } else {
143 // object is in old generation
144 obj_to_scan->oop_iterate(&_old_gen_closure);
145 }
146 }
147 }
148 // For the case of compressed oops, we have a private, non-shared
149 // overflow stack, so we eagerly drain it so as to more evenly
150 // distribute load early. Note: this may be good to do in
151 // general rather than delay for the final stealing phase.
152 // If applicable, we'll transfer a set of objects over to our
153 // work queue, allowing them to be stolen and draining our
154 // private overflow stack.
155 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
156 }
158 bool ParScanThreadState::take_from_overflow_stack() {
159 assert(ParGCUseLocalOverflow, "Else should not call");
160 assert(young_gen()->overflow_list() == NULL, "Error");
161 ObjToScanQueue* queue = work_queue();
162 Stack<oop>* const of_stack = overflow_stack();
163 const size_t num_overflow_elems = of_stack->size();
164 const size_t space_available = queue->max_elems() - queue->size();
165 const size_t num_take_elems = MIN3(space_available / 4,
166 ParGCDesiredObjsFromOverflowList,
167 num_overflow_elems);
168 // Transfer the most recent num_take_elems from the overflow
169 // stack to our work queue.
170 for (size_t i = 0; i != num_take_elems; i++) {
171 oop cur = of_stack->pop();
172 oop obj_to_push = cur->forwardee();
173 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
174 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
175 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
176 if (should_be_partially_scanned(obj_to_push, cur)) {
177 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
178 obj_to_push = cur;
179 }
180 bool ok = queue->push(obj_to_push);
181 assert(ok, "Should have succeeded");
182 }
183 assert(young_gen()->overflow_list() == NULL, "Error");
184 return num_take_elems > 0; // was something transferred?
185 }
187 void ParScanThreadState::push_on_overflow_stack(oop p) {
188 assert(ParGCUseLocalOverflow, "Else should not call");
189 overflow_stack()->push(p);
190 assert(young_gen()->overflow_list() == NULL, "Error");
191 }
193 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
195 // Otherwise, if the object is small enough, try to reallocate the
196 // buffer.
197 HeapWord* obj = NULL;
198 if (!_to_space_full) {
199 ParGCAllocBuffer* const plab = to_space_alloc_buffer();
200 Space* const sp = to_space();
201 if (word_sz * 100 <
202 ParallelGCBufferWastePct * plab->word_sz()) {
203 // Is small enough; abandon this buffer and start a new one.
204 plab->retire(false, false);
205 size_t buf_size = plab->word_sz();
206 HeapWord* buf_space = sp->par_allocate(buf_size);
207 if (buf_space == NULL) {
208 const size_t min_bytes =
209 ParGCAllocBuffer::min_size() << LogHeapWordSize;
210 size_t free_bytes = sp->free();
211 while(buf_space == NULL && free_bytes >= min_bytes) {
212 buf_size = free_bytes >> LogHeapWordSize;
213 assert(buf_size == (size_t)align_object_size(buf_size),
214 "Invariant");
215 buf_space = sp->par_allocate(buf_size);
216 free_bytes = sp->free();
217 }
218 }
219 if (buf_space != NULL) {
220 plab->set_word_size(buf_size);
221 plab->set_buf(buf_space);
222 record_survivor_plab(buf_space, buf_size);
223 obj = plab->allocate(word_sz);
224 // Note that we cannot compare buf_size < word_sz below
225 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
226 assert(obj != NULL || plab->words_remaining() < word_sz,
227 "Else should have been able to allocate");
228 // It's conceivable that we may be able to use the
229 // buffer we just grabbed for subsequent small requests
230 // even if not for this one.
231 } else {
232 // We're used up.
233 _to_space_full = true;
234 }
236 } else {
237 // Too large; allocate the object individually.
238 obj = sp->par_allocate(word_sz);
239 }
240 }
241 return obj;
242 }
245 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
246 size_t word_sz) {
247 // Is the alloc in the current alloc buffer?
248 if (to_space_alloc_buffer()->contains(obj)) {
249 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
250 "Should contain whole object.");
251 to_space_alloc_buffer()->undo_allocation(obj, word_sz);
252 } else {
253 CollectedHeap::fill_with_object(obj, word_sz);
254 }
255 }
257 void ParScanThreadState::print_and_clear_promotion_failure_size() {
258 if (_promotion_failure_size != 0) {
259 if (PrintPromotionFailure) {
260 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
261 _thread_num, _promotion_failure_size);
262 }
263 _promotion_failure_size = 0;
264 }
265 }
267 class ParScanThreadStateSet: private ResourceArray {
268 public:
269 // Initializes states for the specified number of threads;
270 ParScanThreadStateSet(int num_threads,
271 Space& to_space,
272 ParNewGeneration& gen,
273 Generation& old_gen,
274 ObjToScanQueueSet& queue_set,
275 Stack<oop>* overflow_stacks_,
276 size_t desired_plab_sz,
277 ParallelTaskTerminator& term);
279 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
281 inline ParScanThreadState& thread_state(int i);
283 void reset(bool promotion_failed);
284 void flush();
286 #if TASKQUEUE_STATS
287 static void
288 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
289 void print_termination_stats(outputStream* const st = gclog_or_tty);
290 static void
291 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
292 void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
293 void reset_stats();
294 #endif // TASKQUEUE_STATS
296 private:
297 ParallelTaskTerminator& _term;
298 ParNewGeneration& _gen;
299 Generation& _next_gen;
300 };
303 ParScanThreadStateSet::ParScanThreadStateSet(
304 int num_threads, Space& to_space, ParNewGeneration& gen,
305 Generation& old_gen, ObjToScanQueueSet& queue_set,
306 Stack<oop>* overflow_stacks,
307 size_t desired_plab_sz, ParallelTaskTerminator& term)
308 : ResourceArray(sizeof(ParScanThreadState), num_threads),
309 _gen(gen), _next_gen(old_gen), _term(term)
310 {
311 assert(num_threads > 0, "sanity check!");
312 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
313 "overflow_stack allocation mismatch");
314 // Initialize states.
315 for (int i = 0; i < num_threads; ++i) {
316 new ((ParScanThreadState*)_data + i)
317 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
318 overflow_stacks, desired_plab_sz, term);
319 }
320 }
322 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
323 {
324 assert(i >= 0 && i < length(), "sanity check!");
325 return ((ParScanThreadState*)_data)[i];
326 }
329 void ParScanThreadStateSet::reset(bool promotion_failed)
330 {
331 _term.reset_for_reuse();
332 if (promotion_failed) {
333 for (int i = 0; i < length(); ++i) {
334 thread_state(i).print_and_clear_promotion_failure_size();
335 }
336 }
337 }
339 #if TASKQUEUE_STATS
340 void
341 ParScanThreadState::reset_stats()
342 {
343 taskqueue_stats().reset();
344 _term_attempts = 0;
345 _overflow_refills = 0;
346 _overflow_refill_objs = 0;
347 }
349 void ParScanThreadStateSet::reset_stats()
350 {
351 for (int i = 0; i < length(); ++i) {
352 thread_state(i).reset_stats();
353 }
354 }
356 void
357 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
358 {
359 st->print_raw_cr("GC Termination Stats");
360 st->print_raw_cr(" elapsed --strong roots-- "
361 "-------termination-------");
362 st->print_raw_cr("thr ms ms % "
363 " ms % attempts");
364 st->print_raw_cr("--- --------- --------- ------ "
365 "--------- ------ --------");
366 }
368 void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
369 {
370 print_termination_stats_hdr(st);
372 for (int i = 0; i < length(); ++i) {
373 const ParScanThreadState & pss = thread_state(i);
374 const double elapsed_ms = pss.elapsed_time() * 1000.0;
375 const double s_roots_ms = pss.strong_roots_time() * 1000.0;
376 const double term_ms = pss.term_time() * 1000.0;
377 st->print_cr("%3d %9.2f %9.2f %6.2f "
378 "%9.2f %6.2f " SIZE_FORMAT_W(8),
379 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
380 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
381 }
382 }
384 // Print stats related to work queue activity.
385 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st)
386 {
387 st->print_raw_cr("GC Task Stats");
388 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
389 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
390 }
392 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
393 {
394 print_taskqueue_stats_hdr(st);
396 TaskQueueStats totals;
397 for (int i = 0; i < length(); ++i) {
398 const ParScanThreadState & pss = thread_state(i);
399 const TaskQueueStats & stats = pss.taskqueue_stats();
400 st->print("%3d ", i); stats.print(st); st->cr();
401 totals += stats;
403 if (pss.overflow_refills() > 0) {
404 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills "
405 SIZE_FORMAT_W(10) " overflow objects",
406 pss.overflow_refills(), pss.overflow_refill_objs());
407 }
408 }
409 st->print("tot "); totals.print(st); st->cr();
411 DEBUG_ONLY(totals.verify());
412 }
413 #endif // TASKQUEUE_STATS
415 void ParScanThreadStateSet::flush()
416 {
417 // Work in this loop should be kept as lightweight as
418 // possible since this might otherwise become a bottleneck
419 // to scaling. Should we add heavy-weight work into this
420 // loop, consider parallelizing the loop into the worker threads.
421 for (int i = 0; i < length(); ++i) {
422 ParScanThreadState& par_scan_state = thread_state(i);
424 // Flush stats related to To-space PLAB activity and
425 // retire the last buffer.
426 par_scan_state.to_space_alloc_buffer()->
427 flush_stats_and_retire(_gen.plab_stats(),
428 false /* !retain */);
430 // Every thread has its own age table. We need to merge
431 // them all into one.
432 ageTable *local_table = par_scan_state.age_table();
433 _gen.age_table()->merge(local_table);
435 // Inform old gen that we're done.
436 _next_gen.par_promote_alloc_done(i);
437 _next_gen.par_oop_since_save_marks_iterate_done(i);
438 }
440 if (UseConcMarkSweepGC && ParallelGCThreads > 0) {
441 // We need to call this even when ResizeOldPLAB is disabled
442 // so as to avoid breaking some asserts. While we may be able
443 // to avoid this by reorganizing the code a bit, I am loathe
444 // to do that unless we find cases where ergo leads to bad
445 // performance.
446 CFLS_LAB::compute_desired_plab_size();
447 }
448 }
450 ParScanClosure::ParScanClosure(ParNewGeneration* g,
451 ParScanThreadState* par_scan_state) :
452 OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
453 {
454 assert(_g->level() == 0, "Optimized for youngest generation");
455 _boundary = _g->reserved().end();
456 }
458 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
459 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
461 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
462 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
464 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
465 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
467 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
468 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
470 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
471 ParScanThreadState* par_scan_state)
472 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
473 {}
475 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
476 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
478 #ifdef WIN32
479 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
480 #endif
482 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
483 ParScanThreadState* par_scan_state_,
484 ParScanWithoutBarrierClosure* to_space_closure_,
485 ParScanWithBarrierClosure* old_gen_closure_,
486 ParRootScanWithoutBarrierClosure* to_space_root_closure_,
487 ParNewGeneration* par_gen_,
488 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
489 ObjToScanQueueSet* task_queues_,
490 ParallelTaskTerminator* terminator_) :
492 _par_scan_state(par_scan_state_),
493 _to_space_closure(to_space_closure_),
494 _old_gen_closure(old_gen_closure_),
495 _to_space_root_closure(to_space_root_closure_),
496 _old_gen_root_closure(old_gen_root_closure_),
497 _par_gen(par_gen_),
498 _task_queues(task_queues_),
499 _terminator(terminator_)
500 {}
502 void ParEvacuateFollowersClosure::do_void() {
503 ObjToScanQueue* work_q = par_scan_state()->work_queue();
505 while (true) {
507 // Scan to-space and old-gen objs until we run out of both.
508 oop obj_to_scan;
509 par_scan_state()->trim_queues(0);
511 // We have no local work, attempt to steal from other threads.
513 // attempt to steal work from promoted.
514 if (task_queues()->steal(par_scan_state()->thread_num(),
515 par_scan_state()->hash_seed(),
516 obj_to_scan)) {
517 bool res = work_q->push(obj_to_scan);
518 assert(res, "Empty queue should have room for a push.");
520 // if successful, goto Start.
521 continue;
523 // try global overflow list.
524 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
525 continue;
526 }
528 // Otherwise, offer termination.
529 par_scan_state()->start_term_time();
530 if (terminator()->offer_termination()) break;
531 par_scan_state()->end_term_time();
532 }
533 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
534 "Broken overflow list?");
535 // Finish the last termination pause.
536 par_scan_state()->end_term_time();
537 }
539 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
540 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
541 AbstractGangTask("ParNewGeneration collection"),
542 _gen(gen), _next_gen(next_gen),
543 _young_old_boundary(young_old_boundary),
544 _state_set(state_set)
545 {}
547 void ParNewGenTask::work(int i) {
548 GenCollectedHeap* gch = GenCollectedHeap::heap();
549 // Since this is being done in a separate thread, need new resource
550 // and handle marks.
551 ResourceMark rm;
552 HandleMark hm;
553 // We would need multiple old-gen queues otherwise.
554 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
556 Generation* old_gen = gch->next_gen(_gen);
558 ParScanThreadState& par_scan_state = _state_set->thread_state(i);
559 par_scan_state.set_young_old_boundary(_young_old_boundary);
561 par_scan_state.start_strong_roots();
562 gch->gen_process_strong_roots(_gen->level(),
563 true, // Process younger gens, if any,
564 // as strong roots.
565 false, // no scope; this is parallel code
566 false, // not collecting perm generation.
567 SharedHeap::SO_AllClasses,
568 &par_scan_state.to_space_root_closure(),
569 true, // walk *all* scavengable nmethods
570 &par_scan_state.older_gen_closure());
571 par_scan_state.end_strong_roots();
573 // "evacuate followers".
574 par_scan_state.evacuate_followers_closure().do_void();
575 }
577 #ifdef _MSC_VER
578 #pragma warning( push )
579 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
580 #endif
581 ParNewGeneration::
582 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
583 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
584 _overflow_list(NULL),
585 _is_alive_closure(this),
586 _plab_stats(YoungPLABSize, PLABWeight)
587 {
588 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
589 NOT_PRODUCT(_num_par_pushes = 0;)
590 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
591 guarantee(_task_queues != NULL, "task_queues allocation failure.");
593 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
594 ObjToScanQueue *q = new ObjToScanQueue();
595 guarantee(q != NULL, "work_queue Allocation failure.");
596 _task_queues->register_queue(i1, q);
597 }
599 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
600 _task_queues->queue(i2)->initialize();
602 _overflow_stacks = NULL;
603 if (ParGCUseLocalOverflow) {
604 _overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads);
605 for (size_t i = 0; i < ParallelGCThreads; ++i) {
606 new (_overflow_stacks + i) Stack<oop>();
607 }
608 }
610 if (UsePerfData) {
611 EXCEPTION_MARK;
612 ResourceMark rm;
614 const char* cname =
615 PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
616 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
617 ParallelGCThreads, CHECK);
618 }
619 }
620 #ifdef _MSC_VER
621 #pragma warning( pop )
622 #endif
624 // ParNewGeneration::
625 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
626 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
628 template <class T>
629 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
630 #ifdef ASSERT
631 {
632 assert(!oopDesc::is_null(*p), "expected non-null ref");
633 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
634 // We never expect to see a null reference being processed
635 // as a weak reference.
636 assert(obj->is_oop(), "expected an oop while scanning weak refs");
637 }
638 #endif // ASSERT
640 _par_cl->do_oop_nv(p);
642 if (Universe::heap()->is_in_reserved(p)) {
643 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
644 _rs->write_ref_field_gc_par(p, obj);
645 }
646 }
648 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
649 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
651 // ParNewGeneration::
652 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
653 DefNewGeneration::KeepAliveClosure(cl) {}
655 template <class T>
656 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
657 #ifdef ASSERT
658 {
659 assert(!oopDesc::is_null(*p), "expected non-null ref");
660 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
661 // We never expect to see a null reference being processed
662 // as a weak reference.
663 assert(obj->is_oop(), "expected an oop while scanning weak refs");
664 }
665 #endif // ASSERT
667 _cl->do_oop_nv(p);
669 if (Universe::heap()->is_in_reserved(p)) {
670 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
671 _rs->write_ref_field_gc_par(p, obj);
672 }
673 }
675 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
676 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
678 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
679 T heap_oop = oopDesc::load_heap_oop(p);
680 if (!oopDesc::is_null(heap_oop)) {
681 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
682 if ((HeapWord*)obj < _boundary) {
683 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
684 oop new_obj = obj->is_forwarded()
685 ? obj->forwardee()
686 : _g->DefNewGeneration::copy_to_survivor_space(obj);
687 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
688 }
689 if (_gc_barrier) {
690 // If p points to a younger generation, mark the card.
691 if ((HeapWord*)obj < _gen_boundary) {
692 _rs->write_ref_field_gc_par(p, obj);
693 }
694 }
695 }
696 }
698 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
699 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
701 class ParNewRefProcTaskProxy: public AbstractGangTask {
702 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
703 public:
704 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,
705 Generation& next_gen,
706 HeapWord* young_old_boundary,
707 ParScanThreadStateSet& state_set);
709 private:
710 virtual void work(int i);
712 private:
713 ParNewGeneration& _gen;
714 ProcessTask& _task;
715 Generation& _next_gen;
716 HeapWord* _young_old_boundary;
717 ParScanThreadStateSet& _state_set;
718 };
720 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
721 ProcessTask& task, ParNewGeneration& gen,
722 Generation& next_gen,
723 HeapWord* young_old_boundary,
724 ParScanThreadStateSet& state_set)
725 : AbstractGangTask("ParNewGeneration parallel reference processing"),
726 _gen(gen),
727 _task(task),
728 _next_gen(next_gen),
729 _young_old_boundary(young_old_boundary),
730 _state_set(state_set)
731 {
732 }
734 void ParNewRefProcTaskProxy::work(int i)
735 {
736 ResourceMark rm;
737 HandleMark hm;
738 ParScanThreadState& par_scan_state = _state_set.thread_state(i);
739 par_scan_state.set_young_old_boundary(_young_old_boundary);
740 _task.work(i, par_scan_state.is_alive_closure(),
741 par_scan_state.keep_alive_closure(),
742 par_scan_state.evacuate_followers_closure());
743 }
745 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
746 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
747 EnqueueTask& _task;
749 public:
750 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
751 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
752 _task(task)
753 { }
755 virtual void work(int i)
756 {
757 _task.work(i);
758 }
759 };
762 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
763 {
764 GenCollectedHeap* gch = GenCollectedHeap::heap();
765 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
766 "not a generational heap");
767 WorkGang* workers = gch->workers();
768 assert(workers != NULL, "Need parallel worker threads.");
769 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
770 _generation.reserved().end(), _state_set);
771 workers->run_task(&rp_task);
772 _state_set.reset(_generation.promotion_failed());
773 }
775 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
776 {
777 GenCollectedHeap* gch = GenCollectedHeap::heap();
778 WorkGang* workers = gch->workers();
779 assert(workers != NULL, "Need parallel worker threads.");
780 ParNewRefEnqueueTaskProxy enq_task(task);
781 workers->run_task(&enq_task);
782 }
784 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
785 {
786 _state_set.flush();
787 GenCollectedHeap* gch = GenCollectedHeap::heap();
788 gch->set_par_threads(0); // 0 ==> non-parallel.
789 gch->save_marks();
790 }
792 ScanClosureWithParBarrier::
793 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
794 ScanClosure(g, gc_barrier) {}
796 EvacuateFollowersClosureGeneral::
797 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
798 OopsInGenClosure* cur,
799 OopsInGenClosure* older) :
800 _gch(gch), _level(level),
801 _scan_cur_or_nonheap(cur), _scan_older(older)
802 {}
804 void EvacuateFollowersClosureGeneral::do_void() {
805 do {
806 // Beware: this call will lead to closure applications via virtual
807 // calls.
808 _gch->oop_since_save_marks_iterate(_level,
809 _scan_cur_or_nonheap,
810 _scan_older);
811 } while (!_gch->no_allocs_since_save_marks(_level));
812 }
815 bool ParNewGeneration::_avoid_promotion_undo = false;
817 void ParNewGeneration::adjust_desired_tenuring_threshold() {
818 // Set the desired survivor size to half the real survivor space
819 _tenuring_threshold =
820 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
821 }
823 // A Generation that does parallel young-gen collection.
825 void ParNewGeneration::collect(bool full,
826 bool clear_all_soft_refs,
827 size_t size,
828 bool is_tlab) {
829 assert(full || size > 0, "otherwise we don't want to collect");
830 GenCollectedHeap* gch = GenCollectedHeap::heap();
831 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
832 "not a CMS generational heap");
833 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
834 WorkGang* workers = gch->workers();
835 _next_gen = gch->next_gen(this);
836 assert(_next_gen != NULL,
837 "This must be the youngest gen, and not the only gen");
838 assert(gch->n_gens() == 2,
839 "Par collection currently only works with single older gen.");
840 // Do we have to avoid promotion_undo?
841 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
842 set_avoid_promotion_undo(true);
843 }
845 // If the next generation is too full to accomodate worst-case promotion
846 // from this generation, pass on collection; let the next generation
847 // do it.
848 if (!collection_attempt_is_safe()) {
849 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
850 return;
851 }
852 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
854 init_assuming_no_promotion_failure();
856 if (UseAdaptiveSizePolicy) {
857 set_survivor_overflow(false);
858 size_policy->minor_collection_begin();
859 }
861 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
862 // Capture heap used before collection (for printing).
863 size_t gch_prev_used = gch->used();
865 SpecializationStats::clear();
867 age_table()->clear();
868 to()->clear(SpaceDecorator::Mangle);
870 gch->save_marks();
871 assert(workers != NULL, "Need parallel worker threads.");
872 ParallelTaskTerminator _term(workers->total_workers(), task_queues());
873 ParScanThreadStateSet thread_state_set(workers->total_workers(),
874 *to(), *this, *_next_gen, *task_queues(),
875 _overflow_stacks, desired_plab_sz(), _term);
877 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
878 int n_workers = workers->total_workers();
879 gch->set_par_threads(n_workers);
880 gch->rem_set()->prepare_for_younger_refs_iterate(true);
881 // It turns out that even when we're using 1 thread, doing the work in a
882 // separate thread causes wide variance in run times. We can't help this
883 // in the multi-threaded case, but we special-case n=1 here to get
884 // repeatable measurements of the 1-thread overhead of the parallel code.
885 if (n_workers > 1) {
886 GenCollectedHeap::StrongRootsScope srs(gch);
887 workers->run_task(&tsk);
888 } else {
889 GenCollectedHeap::StrongRootsScope srs(gch);
890 tsk.work(0);
891 }
892 thread_state_set.reset(promotion_failed());
894 // Process (weak) reference objects found during scavenge.
895 ReferenceProcessor* rp = ref_processor();
896 IsAliveClosure is_alive(this);
897 ScanWeakRefClosure scan_weak_ref(this);
898 KeepAliveClosure keep_alive(&scan_weak_ref);
899 ScanClosure scan_without_gc_barrier(this, false);
900 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
901 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
902 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
903 &scan_without_gc_barrier, &scan_with_gc_barrier);
904 rp->setup_policy(clear_all_soft_refs);
905 if (rp->processing_is_mt()) {
906 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
907 rp->process_discovered_references(&is_alive, &keep_alive,
908 &evacuate_followers, &task_executor);
909 } else {
910 thread_state_set.flush();
911 gch->set_par_threads(0); // 0 ==> non-parallel.
912 gch->save_marks();
913 rp->process_discovered_references(&is_alive, &keep_alive,
914 &evacuate_followers, NULL);
915 }
916 if (!promotion_failed()) {
917 // Swap the survivor spaces.
918 eden()->clear(SpaceDecorator::Mangle);
919 from()->clear(SpaceDecorator::Mangle);
920 if (ZapUnusedHeapArea) {
921 // This is now done here because of the piece-meal mangling which
922 // can check for valid mangling at intermediate points in the
923 // collection(s). When a minor collection fails to collect
924 // sufficient space resizing of the young generation can occur
925 // an redistribute the spaces in the young generation. Mangle
926 // here so that unzapped regions don't get distributed to
927 // other spaces.
928 to()->mangle_unused_area();
929 }
930 swap_spaces();
932 // A successful scavenge should restart the GC time limit count which is
933 // for full GC's.
934 size_policy->reset_gc_overhead_limit_count();
936 assert(to()->is_empty(), "to space should be empty now");
937 } else {
938 assert(_promo_failure_scan_stack.is_empty(), "post condition");
939 _promo_failure_scan_stack.clear(true); // Clear cached segments.
941 remove_forwarding_pointers();
942 if (PrintGCDetails) {
943 gclog_or_tty->print(" (promotion failed)");
944 }
945 // All the spaces are in play for mark-sweep.
946 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
947 from()->set_next_compaction_space(to());
948 gch->set_incremental_collection_failed();
949 // Inform the next generation that a promotion failure occurred.
950 _next_gen->promotion_failure_occurred();
952 // Reset the PromotionFailureALot counters.
953 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
954 }
955 // set new iteration safe limit for the survivor spaces
956 from()->set_concurrent_iteration_safe_limit(from()->top());
957 to()->set_concurrent_iteration_safe_limit(to()->top());
959 adjust_desired_tenuring_threshold();
960 if (ResizePLAB) {
961 plab_stats()->adjust_desired_plab_sz();
962 }
964 if (PrintGC && !PrintGCDetails) {
965 gch->print_heap_change(gch_prev_used);
966 }
968 if (PrintGCDetails && ParallelGCVerbose) {
969 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
970 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
971 }
973 if (UseAdaptiveSizePolicy) {
974 size_policy->minor_collection_end(gch->gc_cause());
975 size_policy->avg_survived()->sample(from()->used());
976 }
978 update_time_of_last_gc(os::javaTimeMillis());
980 SpecializationStats::print();
982 rp->set_enqueuing_is_done(true);
983 if (rp->processing_is_mt()) {
984 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
985 rp->enqueue_discovered_references(&task_executor);
986 } else {
987 rp->enqueue_discovered_references(NULL);
988 }
989 rp->verify_no_references_recorded();
990 }
992 static int sum;
993 void ParNewGeneration::waste_some_time() {
994 for (int i = 0; i < 100; i++) {
995 sum += i;
996 }
997 }
999 static const oop ClaimedForwardPtr = oop(0x4);
1001 // Because of concurrency, there are times where an object for which
1002 // "is_forwarded()" is true contains an "interim" forwarding pointer
1003 // value. Such a value will soon be overwritten with a real value.
1004 // This method requires "obj" to have a forwarding pointer, and waits, if
1005 // necessary for a real one to be inserted, and returns it.
1007 oop ParNewGeneration::real_forwardee(oop obj) {
1008 oop forward_ptr = obj->forwardee();
1009 if (forward_ptr != ClaimedForwardPtr) {
1010 return forward_ptr;
1011 } else {
1012 return real_forwardee_slow(obj);
1013 }
1014 }
1016 oop ParNewGeneration::real_forwardee_slow(oop obj) {
1017 // Spin-read if it is claimed but not yet written by another thread.
1018 oop forward_ptr = obj->forwardee();
1019 while (forward_ptr == ClaimedForwardPtr) {
1020 waste_some_time();
1021 assert(obj->is_forwarded(), "precondition");
1022 forward_ptr = obj->forwardee();
1023 }
1024 return forward_ptr;
1025 }
1027 #ifdef ASSERT
1028 bool ParNewGeneration::is_legal_forward_ptr(oop p) {
1029 return
1030 (_avoid_promotion_undo && p == ClaimedForwardPtr)
1031 || Universe::heap()->is_in_reserved(p);
1032 }
1033 #endif
1035 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
1036 if ((m != markOopDesc::prototype()) &&
1037 (!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) {
1038 MutexLocker ml(ParGCRareEvent_lock);
1039 DefNewGeneration::preserve_mark_if_necessary(obj, m);
1040 }
1041 }
1043 // Multiple GC threads may try to promote an object. If the object
1044 // is successfully promoted, a forwarding pointer will be installed in
1045 // the object in the young generation. This method claims the right
1046 // to install the forwarding pointer before it copies the object,
1047 // thus avoiding the need to undo the copy as in
1048 // copy_to_survivor_space_avoiding_with_undo.
1050 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
1051 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
1052 // In the sequential version, this assert also says that the object is
1053 // not forwarded. That might not be the case here. It is the case that
1054 // the caller observed it to be not forwarded at some time in the past.
1055 assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1057 // The sequential code read "old->age()" below. That doesn't work here,
1058 // since the age is in the mark word, and that might be overwritten with
1059 // a forwarding pointer by a parallel thread. So we must save the mark
1060 // word in a local and then analyze it.
1061 oopDesc dummyOld;
1062 dummyOld.set_mark(m);
1063 assert(!dummyOld.is_forwarded(),
1064 "should not be called with forwarding pointer mark word.");
1066 oop new_obj = NULL;
1067 oop forward_ptr;
1069 // Try allocating obj in to-space (unless too old)
1070 if (dummyOld.age() < tenuring_threshold()) {
1071 new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1072 if (new_obj == NULL) {
1073 set_survivor_overflow(true);
1074 }
1075 }
1077 if (new_obj == NULL) {
1078 // Either to-space is full or we decided to promote
1079 // try allocating obj tenured
1081 // Attempt to install a null forwarding pointer (atomically),
1082 // to claim the right to install the real forwarding pointer.
1083 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
1084 if (forward_ptr != NULL) {
1085 // someone else beat us to it.
1086 return real_forwardee(old);
1087 }
1089 new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1090 old, m, sz);
1092 if (new_obj == NULL) {
1093 // promotion failed, forward to self
1094 _promotion_failed = true;
1095 new_obj = old;
1097 preserve_mark_if_necessary(old, m);
1098 // Log the size of the maiden promotion failure
1099 par_scan_state->log_promotion_failure(sz);
1100 }
1102 old->forward_to(new_obj);
1103 forward_ptr = NULL;
1104 } else {
1105 // Is in to-space; do copying ourselves.
1106 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1107 forward_ptr = old->forward_to_atomic(new_obj);
1108 // Restore the mark word copied above.
1109 new_obj->set_mark(m);
1110 // Increment age if obj still in new generation
1111 new_obj->incr_age();
1112 par_scan_state->age_table()->add(new_obj, sz);
1113 }
1114 assert(new_obj != NULL, "just checking");
1116 if (forward_ptr == NULL) {
1117 oop obj_to_push = new_obj;
1118 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1119 // Length field used as index of next element to be scanned.
1120 // Real length can be obtained from real_forwardee()
1121 arrayOop(old)->set_length(0);
1122 obj_to_push = old;
1123 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1124 "push forwarded object");
1125 }
1126 // Push it on one of the queues of to-be-scanned objects.
1127 bool simulate_overflow = false;
1128 NOT_PRODUCT(
1129 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1130 // simulate a stack overflow
1131 simulate_overflow = true;
1132 }
1133 )
1134 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1135 // Add stats for overflow pushes.
1136 if (Verbose && PrintGCDetails) {
1137 gclog_or_tty->print("queue overflow!\n");
1138 }
1139 push_on_overflow_list(old, par_scan_state);
1140 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1141 }
1143 return new_obj;
1144 }
1146 // Oops. Someone beat us to it. Undo the allocation. Where did we
1147 // allocate it?
1148 if (is_in_reserved(new_obj)) {
1149 // Must be in to_space.
1150 assert(to()->is_in_reserved(new_obj), "Checking");
1151 if (forward_ptr == ClaimedForwardPtr) {
1152 // Wait to get the real forwarding pointer value.
1153 forward_ptr = real_forwardee(old);
1154 }
1155 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1156 }
1158 return forward_ptr;
1159 }
1162 // Multiple GC threads may try to promote the same object. If two
1163 // or more GC threads copy the object, only one wins the race to install
1164 // the forwarding pointer. The other threads have to undo their copy.
1166 oop ParNewGeneration::copy_to_survivor_space_with_undo(
1167 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
1169 // In the sequential version, this assert also says that the object is
1170 // not forwarded. That might not be the case here. It is the case that
1171 // the caller observed it to be not forwarded at some time in the past.
1172 assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1174 // The sequential code read "old->age()" below. That doesn't work here,
1175 // since the age is in the mark word, and that might be overwritten with
1176 // a forwarding pointer by a parallel thread. So we must save the mark
1177 // word here, install it in a local oopDesc, and then analyze it.
1178 oopDesc dummyOld;
1179 dummyOld.set_mark(m);
1180 assert(!dummyOld.is_forwarded(),
1181 "should not be called with forwarding pointer mark word.");
1183 bool failed_to_promote = false;
1184 oop new_obj = NULL;
1185 oop forward_ptr;
1187 // Try allocating obj in to-space (unless too old)
1188 if (dummyOld.age() < tenuring_threshold()) {
1189 new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1190 if (new_obj == NULL) {
1191 set_survivor_overflow(true);
1192 }
1193 }
1195 if (new_obj == NULL) {
1196 // Either to-space is full or we decided to promote
1197 // try allocating obj tenured
1198 new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1199 old, m, sz);
1201 if (new_obj == NULL) {
1202 // promotion failed, forward to self
1203 forward_ptr = old->forward_to_atomic(old);
1204 new_obj = old;
1206 if (forward_ptr != NULL) {
1207 return forward_ptr; // someone else succeeded
1208 }
1210 _promotion_failed = true;
1211 failed_to_promote = true;
1213 preserve_mark_if_necessary(old, m);
1214 // Log the size of the maiden promotion failure
1215 par_scan_state->log_promotion_failure(sz);
1216 }
1217 } else {
1218 // Is in to-space; do copying ourselves.
1219 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1220 // Restore the mark word copied above.
1221 new_obj->set_mark(m);
1222 // Increment age if new_obj still in new generation
1223 new_obj->incr_age();
1224 par_scan_state->age_table()->add(new_obj, sz);
1225 }
1226 assert(new_obj != NULL, "just checking");
1228 // Now attempt to install the forwarding pointer (atomically).
1229 // We have to copy the mark word before overwriting with forwarding
1230 // ptr, so we can restore it below in the copy.
1231 if (!failed_to_promote) {
1232 forward_ptr = old->forward_to_atomic(new_obj);
1233 }
1235 if (forward_ptr == NULL) {
1236 oop obj_to_push = new_obj;
1237 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1238 // Length field used as index of next element to be scanned.
1239 // Real length can be obtained from real_forwardee()
1240 arrayOop(old)->set_length(0);
1241 obj_to_push = old;
1242 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1243 "push forwarded object");
1244 }
1245 // Push it on one of the queues of to-be-scanned objects.
1246 bool simulate_overflow = false;
1247 NOT_PRODUCT(
1248 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1249 // simulate a stack overflow
1250 simulate_overflow = true;
1251 }
1252 )
1253 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1254 // Add stats for overflow pushes.
1255 push_on_overflow_list(old, par_scan_state);
1256 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1257 }
1259 return new_obj;
1260 }
1262 // Oops. Someone beat us to it. Undo the allocation. Where did we
1263 // allocate it?
1264 if (is_in_reserved(new_obj)) {
1265 // Must be in to_space.
1266 assert(to()->is_in_reserved(new_obj), "Checking");
1267 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1268 } else {
1269 assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
1270 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
1271 (HeapWord*)new_obj, sz);
1272 }
1274 return forward_ptr;
1275 }
1277 #ifndef PRODUCT
1278 // It's OK to call this multi-threaded; the worst thing
1279 // that can happen is that we'll get a bunch of closely
1280 // spaced simulated oveflows, but that's OK, in fact
1281 // probably good as it would exercise the overflow code
1282 // under contention.
1283 bool ParNewGeneration::should_simulate_overflow() {
1284 if (_overflow_counter-- <= 0) { // just being defensive
1285 _overflow_counter = ParGCWorkQueueOverflowInterval;
1286 return true;
1287 } else {
1288 return false;
1289 }
1290 }
1291 #endif
1293 // In case we are using compressed oops, we need to be careful.
1294 // If the object being pushed is an object array, then its length
1295 // field keeps track of the "grey boundary" at which the next
1296 // incremental scan will be done (see ParGCArrayScanChunk).
1297 // When using compressed oops, this length field is kept in the
1298 // lower 32 bits of the erstwhile klass word and cannot be used
1299 // for the overflow chaining pointer (OCP below). As such the OCP
1300 // would itself need to be compressed into the top 32-bits in this
1301 // case. Unfortunately, see below, in the event that we have a
1302 // promotion failure, the node to be pushed on the list can be
1303 // outside of the Java heap, so the heap-based pointer compression
1304 // would not work (we would have potential aliasing between C-heap
1305 // and Java-heap pointers). For this reason, when using compressed
1306 // oops, we simply use a worker-thread-local, non-shared overflow
1307 // list in the form of a growable array, with a slightly different
1308 // overflow stack draining strategy. If/when we start using fat
1309 // stacks here, we can go back to using (fat) pointer chains
1310 // (although some performance comparisons would be useful since
1311 // single global lists have their own performance disadvantages
1312 // as we were made painfully aware not long ago, see 6786503).
1313 #define BUSY (oop(0x1aff1aff))
1314 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1315 assert(is_in_reserved(from_space_obj), "Should be from this generation");
1316 if (ParGCUseLocalOverflow) {
1317 // In the case of compressed oops, we use a private, not-shared
1318 // overflow stack.
1319 par_scan_state->push_on_overflow_stack(from_space_obj);
1320 } else {
1321 assert(!UseCompressedOops, "Error");
1322 // if the object has been forwarded to itself, then we cannot
1323 // use the klass pointer for the linked list. Instead we have
1324 // to allocate an oopDesc in the C-Heap and use that for the linked list.
1325 // XXX This is horribly inefficient when a promotion failure occurs
1326 // and should be fixed. XXX FIX ME !!!
1327 #ifndef PRODUCT
1328 Atomic::inc_ptr(&_num_par_pushes);
1329 assert(_num_par_pushes > 0, "Tautology");
1330 #endif
1331 if (from_space_obj->forwardee() == from_space_obj) {
1332 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
1333 listhead->forward_to(from_space_obj);
1334 from_space_obj = listhead;
1335 }
1336 oop observed_overflow_list = _overflow_list;
1337 oop cur_overflow_list;
1338 do {
1339 cur_overflow_list = observed_overflow_list;
1340 if (cur_overflow_list != BUSY) {
1341 from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1342 } else {
1343 from_space_obj->set_klass_to_list_ptr(NULL);
1344 }
1345 observed_overflow_list =
1346 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
1347 } while (cur_overflow_list != observed_overflow_list);
1348 }
1349 }
1351 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1352 bool res;
1354 if (ParGCUseLocalOverflow) {
1355 res = par_scan_state->take_from_overflow_stack();
1356 } else {
1357 assert(!UseCompressedOops, "Error");
1358 res = take_from_overflow_list_work(par_scan_state);
1359 }
1360 return res;
1361 }
1364 // *NOTE*: The overflow list manipulation code here and
1365 // in CMSCollector:: are very similar in shape,
1366 // except that in the CMS case we thread the objects
1367 // directly into the list via their mark word, and do
1368 // not need to deal with special cases below related
1369 // to chunking of object arrays and promotion failure
1370 // handling.
1371 // CR 6797058 has been filed to attempt consolidation of
1372 // the common code.
1373 // Because of the common code, if you make any changes in
1374 // the code below, please check the CMS version to see if
1375 // similar changes might be needed.
1376 // See CMSCollector::par_take_from_overflow_list() for
1377 // more extensive documentation comments.
1378 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
1379 ObjToScanQueue* work_q = par_scan_state->work_queue();
1380 // How many to take?
1381 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
1382 (size_t)ParGCDesiredObjsFromOverflowList);
1384 assert(!UseCompressedOops, "Error");
1385 assert(par_scan_state->overflow_stack() == NULL, "Error");
1386 if (_overflow_list == NULL) return false;
1388 // Otherwise, there was something there; try claiming the list.
1389 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
1390 // Trim off a prefix of at most objsFromOverflow items
1391 Thread* tid = Thread::current();
1392 size_t spin_count = (size_t)ParallelGCThreads;
1393 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
1394 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
1395 // someone grabbed it before we did ...
1396 // ... we spin for a short while...
1397 os::sleep(tid, sleep_time_millis, false);
1398 if (_overflow_list == NULL) {
1399 // nothing left to take
1400 return false;
1401 } else if (_overflow_list != BUSY) {
1402 // try and grab the prefix
1403 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
1404 }
1405 }
1406 if (prefix == NULL || prefix == BUSY) {
1407 // Nothing to take or waited long enough
1408 if (prefix == NULL) {
1409 // Write back the NULL in case we overwrote it with BUSY above
1410 // and it is still the same value.
1411 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1412 }
1413 return false;
1414 }
1415 assert(prefix != NULL && prefix != BUSY, "Error");
1416 size_t i = 1;
1417 oop cur = prefix;
1418 while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
1419 i++; cur = oop(cur->klass());
1420 }
1422 // Reattach remaining (suffix) to overflow list
1423 if (cur->klass_or_null() == NULL) {
1424 // Write back the NULL in lieu of the BUSY we wrote
1425 // above and it is still the same value.
1426 if (_overflow_list == BUSY) {
1427 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1428 }
1429 } else {
1430 assert(cur->klass_or_null() != BUSY, "Error");
1431 oop suffix = oop(cur->klass()); // suffix will be put back on global list
1432 cur->set_klass_to_list_ptr(NULL); // break off suffix
1433 // It's possible that the list is still in the empty(busy) state
1434 // we left it in a short while ago; in that case we may be
1435 // able to place back the suffix.
1436 oop observed_overflow_list = _overflow_list;
1437 oop cur_overflow_list = observed_overflow_list;
1438 bool attached = false;
1439 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
1440 observed_overflow_list =
1441 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1442 if (cur_overflow_list == observed_overflow_list) {
1443 attached = true;
1444 break;
1445 } else cur_overflow_list = observed_overflow_list;
1446 }
1447 if (!attached) {
1448 // Too bad, someone else got in in between; we'll need to do a splice.
1449 // Find the last item of suffix list
1450 oop last = suffix;
1451 while (last->klass_or_null() != NULL) {
1452 last = oop(last->klass());
1453 }
1454 // Atomically prepend suffix to current overflow list
1455 observed_overflow_list = _overflow_list;
1456 do {
1457 cur_overflow_list = observed_overflow_list;
1458 if (cur_overflow_list != BUSY) {
1459 // Do the splice ...
1460 last->set_klass_to_list_ptr(cur_overflow_list);
1461 } else { // cur_overflow_list == BUSY
1462 last->set_klass_to_list_ptr(NULL);
1463 }
1464 observed_overflow_list =
1465 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1466 } while (cur_overflow_list != observed_overflow_list);
1467 }
1468 }
1470 // Push objects on prefix list onto this thread's work queue
1471 assert(prefix != NULL && prefix != BUSY, "program logic");
1472 cur = prefix;
1473 ssize_t n = 0;
1474 while (cur != NULL) {
1475 oop obj_to_push = cur->forwardee();
1476 oop next = oop(cur->klass_or_null());
1477 cur->set_klass(obj_to_push->klass());
1478 // This may be an array object that is self-forwarded. In that case, the list pointer
1479 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1480 if (!is_in_reserved(cur)) {
1481 // This can become a scaling bottleneck when there is work queue overflow coincident
1482 // with promotion failure.
1483 oopDesc* f = cur;
1484 FREE_C_HEAP_ARRAY(oopDesc, f);
1485 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1486 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1487 obj_to_push = cur;
1488 }
1489 bool ok = work_q->push(obj_to_push);
1490 assert(ok, "Should have succeeded");
1491 cur = next;
1492 n++;
1493 }
1494 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1495 #ifndef PRODUCT
1496 assert(_num_par_pushes >= n, "Too many pops?");
1497 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1498 #endif
1499 return true;
1500 }
1501 #undef BUSY
1503 void ParNewGeneration::ref_processor_init()
1504 {
1505 if (_ref_processor == NULL) {
1506 // Allocate and initialize a reference processor
1507 _ref_processor = ReferenceProcessor::create_ref_processor(
1508 _reserved, // span
1509 refs_discovery_is_atomic(), // atomic_discovery
1510 refs_discovery_is_mt(), // mt_discovery
1511 NULL, // is_alive_non_header
1512 ParallelGCThreads,
1513 ParallelRefProcEnabled);
1514 }
1515 }
1517 const char* ParNewGeneration::name() const {
1518 return "par new generation";
1519 }
1521 bool ParNewGeneration::in_use() {
1522 return UseParNewGC && ParallelGCThreads > 0;
1523 }