Tue, 13 Apr 2010 13:52:10 -0700
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
Summary: Ensure a full GC that clears SoftReferences before throwing an out-of-memory
Reviewed-by: ysr, jcoomes
1 /*
2 * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_parNewGeneration.cpp.incl"
28 #ifdef _MSC_VER
29 #pragma warning( push )
30 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
31 #endif
32 ParScanThreadState::ParScanThreadState(Space* to_space_,
33 ParNewGeneration* gen_,
34 Generation* old_gen_,
35 int thread_num_,
36 ObjToScanQueueSet* work_queue_set_,
37 GrowableArray<oop>** overflow_stack_set_,
38 size_t desired_plab_sz_,
39 ParallelTaskTerminator& term_) :
40 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
41 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
42 _overflow_stack(overflow_stack_set_[thread_num_]),
43 _ageTable(false), // false ==> not the global age table, no perf data.
44 _to_space_alloc_buffer(desired_plab_sz_),
45 _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
46 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
47 _older_gen_closure(gen_, this),
48 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
49 &_to_space_root_closure, gen_, &_old_gen_root_closure,
50 work_queue_set_, &term_),
51 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
52 _keep_alive_closure(&_scan_weak_ref_closure),
53 _promotion_failure_size(0),
54 _pushes(0), _pops(0), _steals(0), _steal_attempts(0), _term_attempts(0),
55 _strong_roots_time(0.0), _term_time(0.0)
56 {
57 _survivor_chunk_array =
58 (ChunkArray*) old_gen()->get_data_recorder(thread_num());
59 _hash_seed = 17; // Might want to take time-based random value.
60 _start = os::elapsedTime();
61 _old_gen_closure.set_generation(old_gen_);
62 _old_gen_root_closure.set_generation(old_gen_);
63 }
64 #ifdef _MSC_VER
65 #pragma warning( pop )
66 #endif
68 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
69 size_t plab_word_size) {
70 ChunkArray* sca = survivor_chunk_array();
71 if (sca != NULL) {
72 // A non-null SCA implies that we want the PLAB data recorded.
73 sca->record_sample(plab_start, plab_word_size);
74 }
75 }
77 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
78 return new_obj->is_objArray() &&
79 arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
80 new_obj != old_obj;
81 }
83 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
84 assert(old->is_objArray(), "must be obj array");
85 assert(old->is_forwarded(), "must be forwarded");
86 assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
87 assert(!old_gen()->is_in(old), "must be in young generation.");
89 objArrayOop obj = objArrayOop(old->forwardee());
90 // Process ParGCArrayScanChunk elements now
91 // and push the remainder back onto queue
92 int start = arrayOop(old)->length();
93 int end = obj->length();
94 int remainder = end - start;
95 assert(start <= end, "just checking");
96 if (remainder > 2 * ParGCArrayScanChunk) {
97 // Test above combines last partial chunk with a full chunk
98 end = start + ParGCArrayScanChunk;
99 arrayOop(old)->set_length(end);
100 // Push remainder.
101 bool ok = work_queue()->push(old);
102 assert(ok, "just popped, push must be okay");
103 note_push();
104 } else {
105 // Restore length so that it can be used if there
106 // is a promotion failure and forwarding pointers
107 // must be removed.
108 arrayOop(old)->set_length(end);
109 }
111 // process our set of indices (include header in first chunk)
112 // should make sure end is even (aligned to HeapWord in case of compressed oops)
113 if ((HeapWord *)obj < young_old_boundary()) {
114 // object is in to_space
115 obj->oop_iterate_range(&_to_space_closure, start, end);
116 } else {
117 // object is in old generation
118 obj->oop_iterate_range(&_old_gen_closure, start, end);
119 }
120 }
123 void ParScanThreadState::trim_queues(int max_size) {
124 ObjToScanQueue* queue = work_queue();
125 do {
126 while (queue->size() > (juint)max_size) {
127 oop obj_to_scan;
128 if (queue->pop_local(obj_to_scan)) {
129 note_pop();
130 if ((HeapWord *)obj_to_scan < young_old_boundary()) {
131 if (obj_to_scan->is_objArray() &&
132 obj_to_scan->is_forwarded() &&
133 obj_to_scan->forwardee() != obj_to_scan) {
134 scan_partial_array_and_push_remainder(obj_to_scan);
135 } else {
136 // object is in to_space
137 obj_to_scan->oop_iterate(&_to_space_closure);
138 }
139 } else {
140 // object is in old generation
141 obj_to_scan->oop_iterate(&_old_gen_closure);
142 }
143 }
144 }
145 // For the case of compressed oops, we have a private, non-shared
146 // overflow stack, so we eagerly drain it so as to more evenly
147 // distribute load early. Note: this may be good to do in
148 // general rather than delay for the final stealing phase.
149 // If applicable, we'll transfer a set of objects over to our
150 // work queue, allowing them to be stolen and draining our
151 // private overflow stack.
152 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
153 }
155 bool ParScanThreadState::take_from_overflow_stack() {
156 assert(ParGCUseLocalOverflow, "Else should not call");
157 assert(young_gen()->overflow_list() == NULL, "Error");
158 ObjToScanQueue* queue = work_queue();
159 GrowableArray<oop>* of_stack = overflow_stack();
160 uint num_overflow_elems = of_stack->length();
161 uint num_take_elems = MIN2(MIN2((queue->max_elems() - queue->size())/4,
162 (juint)ParGCDesiredObjsFromOverflowList),
163 num_overflow_elems);
164 // Transfer the most recent num_take_elems from the overflow
165 // stack to our work queue.
166 for (size_t i = 0; i != num_take_elems; i++) {
167 oop cur = of_stack->pop();
168 oop obj_to_push = cur->forwardee();
169 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
170 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
171 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
172 if (should_be_partially_scanned(obj_to_push, cur)) {
173 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
174 obj_to_push = cur;
175 }
176 bool ok = queue->push(obj_to_push);
177 assert(ok, "Should have succeeded");
178 }
179 assert(young_gen()->overflow_list() == NULL, "Error");
180 return num_take_elems > 0; // was something transferred?
181 }
183 void ParScanThreadState::push_on_overflow_stack(oop p) {
184 assert(ParGCUseLocalOverflow, "Else should not call");
185 overflow_stack()->push(p);
186 assert(young_gen()->overflow_list() == NULL, "Error");
187 }
189 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
191 // Otherwise, if the object is small enough, try to reallocate the
192 // buffer.
193 HeapWord* obj = NULL;
194 if (!_to_space_full) {
195 ParGCAllocBuffer* const plab = to_space_alloc_buffer();
196 Space* const sp = to_space();
197 if (word_sz * 100 <
198 ParallelGCBufferWastePct * plab->word_sz()) {
199 // Is small enough; abandon this buffer and start a new one.
200 plab->retire(false, false);
201 size_t buf_size = plab->word_sz();
202 HeapWord* buf_space = sp->par_allocate(buf_size);
203 if (buf_space == NULL) {
204 const size_t min_bytes =
205 ParGCAllocBuffer::min_size() << LogHeapWordSize;
206 size_t free_bytes = sp->free();
207 while(buf_space == NULL && free_bytes >= min_bytes) {
208 buf_size = free_bytes >> LogHeapWordSize;
209 assert(buf_size == (size_t)align_object_size(buf_size),
210 "Invariant");
211 buf_space = sp->par_allocate(buf_size);
212 free_bytes = sp->free();
213 }
214 }
215 if (buf_space != NULL) {
216 plab->set_word_size(buf_size);
217 plab->set_buf(buf_space);
218 record_survivor_plab(buf_space, buf_size);
219 obj = plab->allocate(word_sz);
220 // Note that we cannot compare buf_size < word_sz below
221 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
222 assert(obj != NULL || plab->words_remaining() < word_sz,
223 "Else should have been able to allocate");
224 // It's conceivable that we may be able to use the
225 // buffer we just grabbed for subsequent small requests
226 // even if not for this one.
227 } else {
228 // We're used up.
229 _to_space_full = true;
230 }
232 } else {
233 // Too large; allocate the object individually.
234 obj = sp->par_allocate(word_sz);
235 }
236 }
237 return obj;
238 }
241 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
242 size_t word_sz) {
243 // Is the alloc in the current alloc buffer?
244 if (to_space_alloc_buffer()->contains(obj)) {
245 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
246 "Should contain whole object.");
247 to_space_alloc_buffer()->undo_allocation(obj, word_sz);
248 } else {
249 CollectedHeap::fill_with_object(obj, word_sz);
250 }
251 }
253 void ParScanThreadState::print_and_clear_promotion_failure_size() {
254 if (_promotion_failure_size != 0) {
255 if (PrintPromotionFailure) {
256 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
257 _thread_num, _promotion_failure_size);
258 }
259 _promotion_failure_size = 0;
260 }
261 }
263 class ParScanThreadStateSet: private ResourceArray {
264 public:
265 // Initializes states for the specified number of threads;
266 ParScanThreadStateSet(int num_threads,
267 Space& to_space,
268 ParNewGeneration& gen,
269 Generation& old_gen,
270 ObjToScanQueueSet& queue_set,
271 GrowableArray<oop>** overflow_stacks_,
272 size_t desired_plab_sz,
273 ParallelTaskTerminator& term);
274 inline ParScanThreadState& thread_state(int i);
275 int pushes() { return _pushes; }
276 int pops() { return _pops; }
277 int steals() { return _steals; }
278 void reset(bool promotion_failed);
279 void flush();
280 private:
281 ParallelTaskTerminator& _term;
282 ParNewGeneration& _gen;
283 Generation& _next_gen;
284 // staticstics
285 int _pushes;
286 int _pops;
287 int _steals;
288 };
291 ParScanThreadStateSet::ParScanThreadStateSet(
292 int num_threads, Space& to_space, ParNewGeneration& gen,
293 Generation& old_gen, ObjToScanQueueSet& queue_set,
294 GrowableArray<oop>** overflow_stack_set_,
295 size_t desired_plab_sz, ParallelTaskTerminator& term)
296 : ResourceArray(sizeof(ParScanThreadState), num_threads),
297 _gen(gen), _next_gen(old_gen), _term(term),
298 _pushes(0), _pops(0), _steals(0)
299 {
300 assert(num_threads > 0, "sanity check!");
301 // Initialize states.
302 for (int i = 0; i < num_threads; ++i) {
303 new ((ParScanThreadState*)_data + i)
304 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
305 overflow_stack_set_, desired_plab_sz, term);
306 }
307 }
309 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
310 {
311 assert(i >= 0 && i < length(), "sanity check!");
312 return ((ParScanThreadState*)_data)[i];
313 }
316 void ParScanThreadStateSet::reset(bool promotion_failed)
317 {
318 _term.reset_for_reuse();
319 if (promotion_failed) {
320 for (int i = 0; i < length(); ++i) {
321 thread_state(i).print_and_clear_promotion_failure_size();
322 }
323 }
324 }
326 void ParScanThreadStateSet::flush()
327 {
328 // Work in this loop should be kept as lightweight as
329 // possible since this might otherwise become a bottleneck
330 // to scaling. Should we add heavy-weight work into this
331 // loop, consider parallelizing the loop into the worker threads.
332 for (int i = 0; i < length(); ++i) {
333 ParScanThreadState& par_scan_state = thread_state(i);
335 // Flush stats related to To-space PLAB activity and
336 // retire the last buffer.
337 par_scan_state.to_space_alloc_buffer()->
338 flush_stats_and_retire(_gen.plab_stats(),
339 false /* !retain */);
341 // Every thread has its own age table. We need to merge
342 // them all into one.
343 ageTable *local_table = par_scan_state.age_table();
344 _gen.age_table()->merge(local_table);
346 // Inform old gen that we're done.
347 _next_gen.par_promote_alloc_done(i);
348 _next_gen.par_oop_since_save_marks_iterate_done(i);
350 // Flush stats related to work queue activity (push/pop/steal)
351 // This could conceivably become a bottleneck; if so, we'll put the
352 // stat's gathering under the flag.
353 if (PAR_STATS_ENABLED) {
354 _pushes += par_scan_state.pushes();
355 _pops += par_scan_state.pops();
356 _steals += par_scan_state.steals();
357 if (ParallelGCVerbose) {
358 gclog_or_tty->print("Thread %d complete:\n"
359 " Pushes: %7d Pops: %7d Steals %7d (in %d attempts)\n",
360 i, par_scan_state.pushes(), par_scan_state.pops(),
361 par_scan_state.steals(), par_scan_state.steal_attempts());
362 if (par_scan_state.overflow_pushes() > 0 ||
363 par_scan_state.overflow_refills() > 0) {
364 gclog_or_tty->print(" Overflow pushes: %7d "
365 "Overflow refills: %7d for %d objs.\n",
366 par_scan_state.overflow_pushes(),
367 par_scan_state.overflow_refills(),
368 par_scan_state.overflow_refill_objs());
369 }
371 double elapsed = par_scan_state.elapsed();
372 double strong_roots = par_scan_state.strong_roots_time();
373 double term = par_scan_state.term_time();
374 gclog_or_tty->print(
375 " Elapsed: %7.2f ms.\n"
376 " Strong roots: %7.2f ms (%6.2f%%)\n"
377 " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n",
378 elapsed * 1000.0,
379 strong_roots * 1000.0, (strong_roots*100.0/elapsed),
380 term * 1000.0, (term*100.0/elapsed),
381 par_scan_state.term_attempts());
382 }
383 }
384 }
385 if (UseConcMarkSweepGC && ParallelGCThreads > 0) {
386 // We need to call this even when ResizeOldPLAB is disabled
387 // so as to avoid breaking some asserts. While we may be able
388 // to avoid this by reorganizing the code a bit, I am loathe
389 // to do that unless we find cases where ergo leads to bad
390 // performance.
391 CFLS_LAB::compute_desired_plab_size();
392 }
393 }
395 ParScanClosure::ParScanClosure(ParNewGeneration* g,
396 ParScanThreadState* par_scan_state) :
397 OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
398 {
399 assert(_g->level() == 0, "Optimized for youngest generation");
400 _boundary = _g->reserved().end();
401 }
403 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
404 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
406 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
407 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
409 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
410 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
412 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
413 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
415 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
416 ParScanThreadState* par_scan_state)
417 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
418 {}
420 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
421 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
423 #ifdef WIN32
424 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
425 #endif
427 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
428 ParScanThreadState* par_scan_state_,
429 ParScanWithoutBarrierClosure* to_space_closure_,
430 ParScanWithBarrierClosure* old_gen_closure_,
431 ParRootScanWithoutBarrierClosure* to_space_root_closure_,
432 ParNewGeneration* par_gen_,
433 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
434 ObjToScanQueueSet* task_queues_,
435 ParallelTaskTerminator* terminator_) :
437 _par_scan_state(par_scan_state_),
438 _to_space_closure(to_space_closure_),
439 _old_gen_closure(old_gen_closure_),
440 _to_space_root_closure(to_space_root_closure_),
441 _old_gen_root_closure(old_gen_root_closure_),
442 _par_gen(par_gen_),
443 _task_queues(task_queues_),
444 _terminator(terminator_)
445 {}
447 void ParEvacuateFollowersClosure::do_void() {
448 ObjToScanQueue* work_q = par_scan_state()->work_queue();
450 while (true) {
452 // Scan to-space and old-gen objs until we run out of both.
453 oop obj_to_scan;
454 par_scan_state()->trim_queues(0);
456 // We have no local work, attempt to steal from other threads.
458 // attempt to steal work from promoted.
459 par_scan_state()->note_steal_attempt();
460 if (task_queues()->steal(par_scan_state()->thread_num(),
461 par_scan_state()->hash_seed(),
462 obj_to_scan)) {
463 par_scan_state()->note_steal();
464 bool res = work_q->push(obj_to_scan);
465 assert(res, "Empty queue should have room for a push.");
467 par_scan_state()->note_push();
468 // if successful, goto Start.
469 continue;
471 // try global overflow list.
472 } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
473 continue;
474 }
476 // Otherwise, offer termination.
477 par_scan_state()->start_term_time();
478 if (terminator()->offer_termination()) break;
479 par_scan_state()->end_term_time();
480 }
481 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
482 "Broken overflow list?");
483 // Finish the last termination pause.
484 par_scan_state()->end_term_time();
485 }
487 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
488 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
489 AbstractGangTask("ParNewGeneration collection"),
490 _gen(gen), _next_gen(next_gen),
491 _young_old_boundary(young_old_boundary),
492 _state_set(state_set)
493 {}
495 void ParNewGenTask::work(int i) {
496 GenCollectedHeap* gch = GenCollectedHeap::heap();
497 // Since this is being done in a separate thread, need new resource
498 // and handle marks.
499 ResourceMark rm;
500 HandleMark hm;
501 // We would need multiple old-gen queues otherwise.
502 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
504 Generation* old_gen = gch->next_gen(_gen);
506 ParScanThreadState& par_scan_state = _state_set->thread_state(i);
507 par_scan_state.set_young_old_boundary(_young_old_boundary);
509 par_scan_state.start_strong_roots();
510 gch->gen_process_strong_roots(_gen->level(),
511 true, // Process younger gens, if any,
512 // as strong roots.
513 false, // no scope; this is parallel code
514 false, // not collecting perm generation.
515 SharedHeap::SO_AllClasses,
516 &par_scan_state.to_space_root_closure(),
517 true, // walk *all* scavengable nmethods
518 &par_scan_state.older_gen_closure());
519 par_scan_state.end_strong_roots();
521 // "evacuate followers".
522 par_scan_state.evacuate_followers_closure().do_void();
523 }
525 #ifdef _MSC_VER
526 #pragma warning( push )
527 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
528 #endif
529 ParNewGeneration::
530 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
531 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
532 _overflow_list(NULL),
533 _is_alive_closure(this),
534 _plab_stats(YoungPLABSize, PLABWeight)
535 {
536 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
537 NOT_PRODUCT(_num_par_pushes = 0;)
538 _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
539 guarantee(_task_queues != NULL, "task_queues allocation failure.");
541 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
542 ObjToScanQueuePadded *q_padded = new ObjToScanQueuePadded();
543 guarantee(q_padded != NULL, "work_queue Allocation failure.");
545 _task_queues->register_queue(i1, &q_padded->work_queue);
546 }
548 for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
549 _task_queues->queue(i2)->initialize();
551 _overflow_stacks = NEW_C_HEAP_ARRAY(GrowableArray<oop>*, ParallelGCThreads);
552 guarantee(_overflow_stacks != NULL, "Overflow stack set allocation failure");
553 for (uint i = 0; i < ParallelGCThreads; i++) {
554 if (ParGCUseLocalOverflow) {
555 _overflow_stacks[i] = new (ResourceObj::C_HEAP) GrowableArray<oop>(512, true);
556 guarantee(_overflow_stacks[i] != NULL, "Overflow Stack allocation failure.");
557 } else {
558 _overflow_stacks[i] = NULL;
559 }
560 }
562 if (UsePerfData) {
563 EXCEPTION_MARK;
564 ResourceMark rm;
566 const char* cname =
567 PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
568 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
569 ParallelGCThreads, CHECK);
570 }
571 }
572 #ifdef _MSC_VER
573 #pragma warning( pop )
574 #endif
576 // ParNewGeneration::
577 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
578 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
580 template <class T>
581 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
582 #ifdef ASSERT
583 {
584 assert(!oopDesc::is_null(*p), "expected non-null ref");
585 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
586 // We never expect to see a null reference being processed
587 // as a weak reference.
588 assert(obj->is_oop(), "expected an oop while scanning weak refs");
589 }
590 #endif // ASSERT
592 _par_cl->do_oop_nv(p);
594 if (Universe::heap()->is_in_reserved(p)) {
595 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
596 _rs->write_ref_field_gc_par(p, obj);
597 }
598 }
600 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
601 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
603 // ParNewGeneration::
604 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
605 DefNewGeneration::KeepAliveClosure(cl) {}
607 template <class T>
608 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
609 #ifdef ASSERT
610 {
611 assert(!oopDesc::is_null(*p), "expected non-null ref");
612 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
613 // We never expect to see a null reference being processed
614 // as a weak reference.
615 assert(obj->is_oop(), "expected an oop while scanning weak refs");
616 }
617 #endif // ASSERT
619 _cl->do_oop_nv(p);
621 if (Universe::heap()->is_in_reserved(p)) {
622 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
623 _rs->write_ref_field_gc_par(p, obj);
624 }
625 }
627 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
628 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
630 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
631 T heap_oop = oopDesc::load_heap_oop(p);
632 if (!oopDesc::is_null(heap_oop)) {
633 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
634 if ((HeapWord*)obj < _boundary) {
635 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
636 oop new_obj = obj->is_forwarded()
637 ? obj->forwardee()
638 : _g->DefNewGeneration::copy_to_survivor_space(obj);
639 oopDesc::encode_store_heap_oop_not_null(p, new_obj);
640 }
641 if (_gc_barrier) {
642 // If p points to a younger generation, mark the card.
643 if ((HeapWord*)obj < _gen_boundary) {
644 _rs->write_ref_field_gc_par(p, obj);
645 }
646 }
647 }
648 }
650 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
651 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
653 class ParNewRefProcTaskProxy: public AbstractGangTask {
654 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
655 public:
656 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,
657 Generation& next_gen,
658 HeapWord* young_old_boundary,
659 ParScanThreadStateSet& state_set);
661 private:
662 virtual void work(int i);
664 private:
665 ParNewGeneration& _gen;
666 ProcessTask& _task;
667 Generation& _next_gen;
668 HeapWord* _young_old_boundary;
669 ParScanThreadStateSet& _state_set;
670 };
672 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
673 ProcessTask& task, ParNewGeneration& gen,
674 Generation& next_gen,
675 HeapWord* young_old_boundary,
676 ParScanThreadStateSet& state_set)
677 : AbstractGangTask("ParNewGeneration parallel reference processing"),
678 _gen(gen),
679 _task(task),
680 _next_gen(next_gen),
681 _young_old_boundary(young_old_boundary),
682 _state_set(state_set)
683 {
684 }
686 void ParNewRefProcTaskProxy::work(int i)
687 {
688 ResourceMark rm;
689 HandleMark hm;
690 ParScanThreadState& par_scan_state = _state_set.thread_state(i);
691 par_scan_state.set_young_old_boundary(_young_old_boundary);
692 _task.work(i, par_scan_state.is_alive_closure(),
693 par_scan_state.keep_alive_closure(),
694 par_scan_state.evacuate_followers_closure());
695 }
697 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
698 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
699 EnqueueTask& _task;
701 public:
702 ParNewRefEnqueueTaskProxy(EnqueueTask& task)
703 : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
704 _task(task)
705 { }
707 virtual void work(int i)
708 {
709 _task.work(i);
710 }
711 };
714 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
715 {
716 GenCollectedHeap* gch = GenCollectedHeap::heap();
717 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
718 "not a generational heap");
719 WorkGang* workers = gch->workers();
720 assert(workers != NULL, "Need parallel worker threads.");
721 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
722 _generation.reserved().end(), _state_set);
723 workers->run_task(&rp_task);
724 _state_set.reset(_generation.promotion_failed());
725 }
727 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
728 {
729 GenCollectedHeap* gch = GenCollectedHeap::heap();
730 WorkGang* workers = gch->workers();
731 assert(workers != NULL, "Need parallel worker threads.");
732 ParNewRefEnqueueTaskProxy enq_task(task);
733 workers->run_task(&enq_task);
734 }
736 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
737 {
738 _state_set.flush();
739 GenCollectedHeap* gch = GenCollectedHeap::heap();
740 gch->set_par_threads(0); // 0 ==> non-parallel.
741 gch->save_marks();
742 }
744 ScanClosureWithParBarrier::
745 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
746 ScanClosure(g, gc_barrier) {}
748 EvacuateFollowersClosureGeneral::
749 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
750 OopsInGenClosure* cur,
751 OopsInGenClosure* older) :
752 _gch(gch), _level(level),
753 _scan_cur_or_nonheap(cur), _scan_older(older)
754 {}
756 void EvacuateFollowersClosureGeneral::do_void() {
757 do {
758 // Beware: this call will lead to closure applications via virtual
759 // calls.
760 _gch->oop_since_save_marks_iterate(_level,
761 _scan_cur_or_nonheap,
762 _scan_older);
763 } while (!_gch->no_allocs_since_save_marks(_level));
764 }
767 bool ParNewGeneration::_avoid_promotion_undo = false;
769 void ParNewGeneration::adjust_desired_tenuring_threshold() {
770 // Set the desired survivor size to half the real survivor space
771 _tenuring_threshold =
772 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
773 }
775 // A Generation that does parallel young-gen collection.
777 void ParNewGeneration::collect(bool full,
778 bool clear_all_soft_refs,
779 size_t size,
780 bool is_tlab) {
781 assert(full || size > 0, "otherwise we don't want to collect");
782 GenCollectedHeap* gch = GenCollectedHeap::heap();
783 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
784 "not a CMS generational heap");
785 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
786 WorkGang* workers = gch->workers();
787 _next_gen = gch->next_gen(this);
788 assert(_next_gen != NULL,
789 "This must be the youngest gen, and not the only gen");
790 assert(gch->n_gens() == 2,
791 "Par collection currently only works with single older gen.");
792 // Do we have to avoid promotion_undo?
793 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
794 set_avoid_promotion_undo(true);
795 }
797 // If the next generation is too full to accomodate worst-case promotion
798 // from this generation, pass on collection; let the next generation
799 // do it.
800 if (!collection_attempt_is_safe()) {
801 gch->set_incremental_collection_will_fail();
802 return;
803 }
804 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
806 init_assuming_no_promotion_failure();
808 if (UseAdaptiveSizePolicy) {
809 set_survivor_overflow(false);
810 size_policy->minor_collection_begin();
811 }
813 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
814 // Capture heap used before collection (for printing).
815 size_t gch_prev_used = gch->used();
817 SpecializationStats::clear();
819 age_table()->clear();
820 to()->clear(SpaceDecorator::Mangle);
822 gch->save_marks();
823 assert(workers != NULL, "Need parallel worker threads.");
824 ParallelTaskTerminator _term(workers->total_workers(), task_queues());
825 ParScanThreadStateSet thread_state_set(workers->total_workers(),
826 *to(), *this, *_next_gen, *task_queues(),
827 _overflow_stacks, desired_plab_sz(), _term);
829 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
830 int n_workers = workers->total_workers();
831 gch->set_par_threads(n_workers);
832 gch->rem_set()->prepare_for_younger_refs_iterate(true);
833 // It turns out that even when we're using 1 thread, doing the work in a
834 // separate thread causes wide variance in run times. We can't help this
835 // in the multi-threaded case, but we special-case n=1 here to get
836 // repeatable measurements of the 1-thread overhead of the parallel code.
837 if (n_workers > 1) {
838 GenCollectedHeap::StrongRootsScope srs(gch);
839 workers->run_task(&tsk);
840 } else {
841 GenCollectedHeap::StrongRootsScope srs(gch);
842 tsk.work(0);
843 }
844 thread_state_set.reset(promotion_failed());
846 if (PAR_STATS_ENABLED && ParallelGCVerbose) {
847 gclog_or_tty->print("Thread totals:\n"
848 " Pushes: %7d Pops: %7d Steals %7d (sum = %7d).\n",
849 thread_state_set.pushes(), thread_state_set.pops(),
850 thread_state_set.steals(),
851 thread_state_set.pops()+thread_state_set.steals());
852 }
853 assert(thread_state_set.pushes() == thread_state_set.pops()
854 + thread_state_set.steals(),
855 "Or else the queues are leaky.");
857 // Process (weak) reference objects found during scavenge.
858 ReferenceProcessor* rp = ref_processor();
859 IsAliveClosure is_alive(this);
860 ScanWeakRefClosure scan_weak_ref(this);
861 KeepAliveClosure keep_alive(&scan_weak_ref);
862 ScanClosure scan_without_gc_barrier(this, false);
863 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
864 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
865 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
866 &scan_without_gc_barrier, &scan_with_gc_barrier);
867 rp->setup_policy(clear_all_soft_refs);
868 if (rp->processing_is_mt()) {
869 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
870 rp->process_discovered_references(&is_alive, &keep_alive,
871 &evacuate_followers, &task_executor);
872 } else {
873 thread_state_set.flush();
874 gch->set_par_threads(0); // 0 ==> non-parallel.
875 gch->save_marks();
876 rp->process_discovered_references(&is_alive, &keep_alive,
877 &evacuate_followers, NULL);
878 }
879 if (!promotion_failed()) {
880 // Swap the survivor spaces.
881 eden()->clear(SpaceDecorator::Mangle);
882 from()->clear(SpaceDecorator::Mangle);
883 if (ZapUnusedHeapArea) {
884 // This is now done here because of the piece-meal mangling which
885 // can check for valid mangling at intermediate points in the
886 // collection(s). When a minor collection fails to collect
887 // sufficient space resizing of the young generation can occur
888 // an redistribute the spaces in the young generation. Mangle
889 // here so that unzapped regions don't get distributed to
890 // other spaces.
891 to()->mangle_unused_area();
892 }
893 swap_spaces();
895 // A successful scavenge should restart the GC time limit count which is
896 // for full GC's.
897 size_policy->reset_gc_overhead_limit_count();
899 assert(to()->is_empty(), "to space should be empty now");
900 } else {
901 assert(HandlePromotionFailure,
902 "Should only be here if promotion failure handling is on");
903 if (_promo_failure_scan_stack != NULL) {
904 // Can be non-null because of reference processing.
905 // Free stack with its elements.
906 delete _promo_failure_scan_stack;
907 _promo_failure_scan_stack = NULL;
908 }
909 remove_forwarding_pointers();
910 if (PrintGCDetails) {
911 gclog_or_tty->print(" (promotion failed)");
912 }
913 // All the spaces are in play for mark-sweep.
914 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
915 from()->set_next_compaction_space(to());
916 gch->set_incremental_collection_will_fail();
917 // Inform the next generation that a promotion failure occurred.
918 _next_gen->promotion_failure_occurred();
920 // Reset the PromotionFailureALot counters.
921 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
922 }
923 // set new iteration safe limit for the survivor spaces
924 from()->set_concurrent_iteration_safe_limit(from()->top());
925 to()->set_concurrent_iteration_safe_limit(to()->top());
927 adjust_desired_tenuring_threshold();
928 if (ResizePLAB) {
929 plab_stats()->adjust_desired_plab_sz();
930 }
932 if (PrintGC && !PrintGCDetails) {
933 gch->print_heap_change(gch_prev_used);
934 }
936 if (UseAdaptiveSizePolicy) {
937 size_policy->minor_collection_end(gch->gc_cause());
938 size_policy->avg_survived()->sample(from()->used());
939 }
941 update_time_of_last_gc(os::javaTimeMillis());
943 SpecializationStats::print();
945 rp->set_enqueuing_is_done(true);
946 if (rp->processing_is_mt()) {
947 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
948 rp->enqueue_discovered_references(&task_executor);
949 } else {
950 rp->enqueue_discovered_references(NULL);
951 }
952 rp->verify_no_references_recorded();
953 }
955 static int sum;
956 void ParNewGeneration::waste_some_time() {
957 for (int i = 0; i < 100; i++) {
958 sum += i;
959 }
960 }
962 static const oop ClaimedForwardPtr = oop(0x4);
964 // Because of concurrency, there are times where an object for which
965 // "is_forwarded()" is true contains an "interim" forwarding pointer
966 // value. Such a value will soon be overwritten with a real value.
967 // This method requires "obj" to have a forwarding pointer, and waits, if
968 // necessary for a real one to be inserted, and returns it.
970 oop ParNewGeneration::real_forwardee(oop obj) {
971 oop forward_ptr = obj->forwardee();
972 if (forward_ptr != ClaimedForwardPtr) {
973 return forward_ptr;
974 } else {
975 return real_forwardee_slow(obj);
976 }
977 }
979 oop ParNewGeneration::real_forwardee_slow(oop obj) {
980 // Spin-read if it is claimed but not yet written by another thread.
981 oop forward_ptr = obj->forwardee();
982 while (forward_ptr == ClaimedForwardPtr) {
983 waste_some_time();
984 assert(obj->is_forwarded(), "precondition");
985 forward_ptr = obj->forwardee();
986 }
987 return forward_ptr;
988 }
990 #ifdef ASSERT
991 bool ParNewGeneration::is_legal_forward_ptr(oop p) {
992 return
993 (_avoid_promotion_undo && p == ClaimedForwardPtr)
994 || Universe::heap()->is_in_reserved(p);
995 }
996 #endif
998 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
999 if ((m != markOopDesc::prototype()) &&
1000 (!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) {
1001 MutexLocker ml(ParGCRareEvent_lock);
1002 DefNewGeneration::preserve_mark_if_necessary(obj, m);
1003 }
1004 }
1006 // Multiple GC threads may try to promote an object. If the object
1007 // is successfully promoted, a forwarding pointer will be installed in
1008 // the object in the young generation. This method claims the right
1009 // to install the forwarding pointer before it copies the object,
1010 // thus avoiding the need to undo the copy as in
1011 // copy_to_survivor_space_avoiding_with_undo.
1013 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
1014 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
1015 // In the sequential version, this assert also says that the object is
1016 // not forwarded. That might not be the case here. It is the case that
1017 // the caller observed it to be not forwarded at some time in the past.
1018 assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1020 // The sequential code read "old->age()" below. That doesn't work here,
1021 // since the age is in the mark word, and that might be overwritten with
1022 // a forwarding pointer by a parallel thread. So we must save the mark
1023 // word in a local and then analyze it.
1024 oopDesc dummyOld;
1025 dummyOld.set_mark(m);
1026 assert(!dummyOld.is_forwarded(),
1027 "should not be called with forwarding pointer mark word.");
1029 oop new_obj = NULL;
1030 oop forward_ptr;
1032 // Try allocating obj in to-space (unless too old)
1033 if (dummyOld.age() < tenuring_threshold()) {
1034 new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1035 if (new_obj == NULL) {
1036 set_survivor_overflow(true);
1037 }
1038 }
1040 if (new_obj == NULL) {
1041 // Either to-space is full or we decided to promote
1042 // try allocating obj tenured
1044 // Attempt to install a null forwarding pointer (atomically),
1045 // to claim the right to install the real forwarding pointer.
1046 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
1047 if (forward_ptr != NULL) {
1048 // someone else beat us to it.
1049 return real_forwardee(old);
1050 }
1052 new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1053 old, m, sz);
1055 if (new_obj == NULL) {
1056 if (!HandlePromotionFailure) {
1057 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
1058 // is incorrectly set. In any case, its seriously wrong to be here!
1059 vm_exit_out_of_memory(sz*wordSize, "promotion");
1060 }
1061 // promotion failed, forward to self
1062 _promotion_failed = true;
1063 new_obj = old;
1065 preserve_mark_if_necessary(old, m);
1066 // Log the size of the maiden promotion failure
1067 par_scan_state->log_promotion_failure(sz);
1068 }
1070 old->forward_to(new_obj);
1071 forward_ptr = NULL;
1072 } else {
1073 // Is in to-space; do copying ourselves.
1074 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1075 forward_ptr = old->forward_to_atomic(new_obj);
1076 // Restore the mark word copied above.
1077 new_obj->set_mark(m);
1078 // Increment age if obj still in new generation
1079 new_obj->incr_age();
1080 par_scan_state->age_table()->add(new_obj, sz);
1081 }
1082 assert(new_obj != NULL, "just checking");
1084 if (forward_ptr == NULL) {
1085 oop obj_to_push = new_obj;
1086 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1087 // Length field used as index of next element to be scanned.
1088 // Real length can be obtained from real_forwardee()
1089 arrayOop(old)->set_length(0);
1090 obj_to_push = old;
1091 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1092 "push forwarded object");
1093 }
1094 // Push it on one of the queues of to-be-scanned objects.
1095 bool simulate_overflow = false;
1096 NOT_PRODUCT(
1097 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1098 // simulate a stack overflow
1099 simulate_overflow = true;
1100 }
1101 )
1102 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1103 // Add stats for overflow pushes.
1104 if (Verbose && PrintGCDetails) {
1105 gclog_or_tty->print("queue overflow!\n");
1106 }
1107 push_on_overflow_list(old, par_scan_state);
1108 par_scan_state->note_overflow_push();
1109 }
1110 par_scan_state->note_push();
1112 return new_obj;
1113 }
1115 // Oops. Someone beat us to it. Undo the allocation. Where did we
1116 // allocate it?
1117 if (is_in_reserved(new_obj)) {
1118 // Must be in to_space.
1119 assert(to()->is_in_reserved(new_obj), "Checking");
1120 if (forward_ptr == ClaimedForwardPtr) {
1121 // Wait to get the real forwarding pointer value.
1122 forward_ptr = real_forwardee(old);
1123 }
1124 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1125 }
1127 return forward_ptr;
1128 }
1131 // Multiple GC threads may try to promote the same object. If two
1132 // or more GC threads copy the object, only one wins the race to install
1133 // the forwarding pointer. The other threads have to undo their copy.
1135 oop ParNewGeneration::copy_to_survivor_space_with_undo(
1136 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
1138 // In the sequential version, this assert also says that the object is
1139 // not forwarded. That might not be the case here. It is the case that
1140 // the caller observed it to be not forwarded at some time in the past.
1141 assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1143 // The sequential code read "old->age()" below. That doesn't work here,
1144 // since the age is in the mark word, and that might be overwritten with
1145 // a forwarding pointer by a parallel thread. So we must save the mark
1146 // word here, install it in a local oopDesc, and then analyze it.
1147 oopDesc dummyOld;
1148 dummyOld.set_mark(m);
1149 assert(!dummyOld.is_forwarded(),
1150 "should not be called with forwarding pointer mark word.");
1152 bool failed_to_promote = false;
1153 oop new_obj = NULL;
1154 oop forward_ptr;
1156 // Try allocating obj in to-space (unless too old)
1157 if (dummyOld.age() < tenuring_threshold()) {
1158 new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1159 if (new_obj == NULL) {
1160 set_survivor_overflow(true);
1161 }
1162 }
1164 if (new_obj == NULL) {
1165 // Either to-space is full or we decided to promote
1166 // try allocating obj tenured
1167 new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1168 old, m, sz);
1170 if (new_obj == NULL) {
1171 if (!HandlePromotionFailure) {
1172 // A failed promotion likely means the MaxLiveObjectEvacuationRatio
1173 // flag is incorrectly set. In any case, its seriously wrong to be
1174 // here!
1175 vm_exit_out_of_memory(sz*wordSize, "promotion");
1176 }
1177 // promotion failed, forward to self
1178 forward_ptr = old->forward_to_atomic(old);
1179 new_obj = old;
1181 if (forward_ptr != NULL) {
1182 return forward_ptr; // someone else succeeded
1183 }
1185 _promotion_failed = true;
1186 failed_to_promote = true;
1188 preserve_mark_if_necessary(old, m);
1189 // Log the size of the maiden promotion failure
1190 par_scan_state->log_promotion_failure(sz);
1191 }
1192 } else {
1193 // Is in to-space; do copying ourselves.
1194 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1195 // Restore the mark word copied above.
1196 new_obj->set_mark(m);
1197 // Increment age if new_obj still in new generation
1198 new_obj->incr_age();
1199 par_scan_state->age_table()->add(new_obj, sz);
1200 }
1201 assert(new_obj != NULL, "just checking");
1203 // Now attempt to install the forwarding pointer (atomically).
1204 // We have to copy the mark word before overwriting with forwarding
1205 // ptr, so we can restore it below in the copy.
1206 if (!failed_to_promote) {
1207 forward_ptr = old->forward_to_atomic(new_obj);
1208 }
1210 if (forward_ptr == NULL) {
1211 oop obj_to_push = new_obj;
1212 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1213 // Length field used as index of next element to be scanned.
1214 // Real length can be obtained from real_forwardee()
1215 arrayOop(old)->set_length(0);
1216 obj_to_push = old;
1217 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1218 "push forwarded object");
1219 }
1220 // Push it on one of the queues of to-be-scanned objects.
1221 bool simulate_overflow = false;
1222 NOT_PRODUCT(
1223 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1224 // simulate a stack overflow
1225 simulate_overflow = true;
1226 }
1227 )
1228 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1229 // Add stats for overflow pushes.
1230 push_on_overflow_list(old, par_scan_state);
1231 par_scan_state->note_overflow_push();
1232 }
1233 par_scan_state->note_push();
1235 return new_obj;
1236 }
1238 // Oops. Someone beat us to it. Undo the allocation. Where did we
1239 // allocate it?
1240 if (is_in_reserved(new_obj)) {
1241 // Must be in to_space.
1242 assert(to()->is_in_reserved(new_obj), "Checking");
1243 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1244 } else {
1245 assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
1246 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
1247 (HeapWord*)new_obj, sz);
1248 }
1250 return forward_ptr;
1251 }
1253 #ifndef PRODUCT
1254 // It's OK to call this multi-threaded; the worst thing
1255 // that can happen is that we'll get a bunch of closely
1256 // spaced simulated oveflows, but that's OK, in fact
1257 // probably good as it would exercise the overflow code
1258 // under contention.
1259 bool ParNewGeneration::should_simulate_overflow() {
1260 if (_overflow_counter-- <= 0) { // just being defensive
1261 _overflow_counter = ParGCWorkQueueOverflowInterval;
1262 return true;
1263 } else {
1264 return false;
1265 }
1266 }
1267 #endif
1269 // In case we are using compressed oops, we need to be careful.
1270 // If the object being pushed is an object array, then its length
1271 // field keeps track of the "grey boundary" at which the next
1272 // incremental scan will be done (see ParGCArrayScanChunk).
1273 // When using compressed oops, this length field is kept in the
1274 // lower 32 bits of the erstwhile klass word and cannot be used
1275 // for the overflow chaining pointer (OCP below). As such the OCP
1276 // would itself need to be compressed into the top 32-bits in this
1277 // case. Unfortunately, see below, in the event that we have a
1278 // promotion failure, the node to be pushed on the list can be
1279 // outside of the Java heap, so the heap-based pointer compression
1280 // would not work (we would have potential aliasing between C-heap
1281 // and Java-heap pointers). For this reason, when using compressed
1282 // oops, we simply use a worker-thread-local, non-shared overflow
1283 // list in the form of a growable array, with a slightly different
1284 // overflow stack draining strategy. If/when we start using fat
1285 // stacks here, we can go back to using (fat) pointer chains
1286 // (although some performance comparisons would be useful since
1287 // single global lists have their own performance disadvantages
1288 // as we were made painfully aware not long ago, see 6786503).
1289 #define BUSY (oop(0x1aff1aff))
1290 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1291 assert(is_in_reserved(from_space_obj), "Should be from this generation");
1292 if (ParGCUseLocalOverflow) {
1293 // In the case of compressed oops, we use a private, not-shared
1294 // overflow stack.
1295 par_scan_state->push_on_overflow_stack(from_space_obj);
1296 } else {
1297 assert(!UseCompressedOops, "Error");
1298 // if the object has been forwarded to itself, then we cannot
1299 // use the klass pointer for the linked list. Instead we have
1300 // to allocate an oopDesc in the C-Heap and use that for the linked list.
1301 // XXX This is horribly inefficient when a promotion failure occurs
1302 // and should be fixed. XXX FIX ME !!!
1303 #ifndef PRODUCT
1304 Atomic::inc_ptr(&_num_par_pushes);
1305 assert(_num_par_pushes > 0, "Tautology");
1306 #endif
1307 if (from_space_obj->forwardee() == from_space_obj) {
1308 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
1309 listhead->forward_to(from_space_obj);
1310 from_space_obj = listhead;
1311 }
1312 oop observed_overflow_list = _overflow_list;
1313 oop cur_overflow_list;
1314 do {
1315 cur_overflow_list = observed_overflow_list;
1316 if (cur_overflow_list != BUSY) {
1317 from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1318 } else {
1319 from_space_obj->set_klass_to_list_ptr(NULL);
1320 }
1321 observed_overflow_list =
1322 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
1323 } while (cur_overflow_list != observed_overflow_list);
1324 }
1325 }
1327 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1328 bool res;
1330 if (ParGCUseLocalOverflow) {
1331 res = par_scan_state->take_from_overflow_stack();
1332 } else {
1333 assert(!UseCompressedOops, "Error");
1334 res = take_from_overflow_list_work(par_scan_state);
1335 }
1336 return res;
1337 }
1340 // *NOTE*: The overflow list manipulation code here and
1341 // in CMSCollector:: are very similar in shape,
1342 // except that in the CMS case we thread the objects
1343 // directly into the list via their mark word, and do
1344 // not need to deal with special cases below related
1345 // to chunking of object arrays and promotion failure
1346 // handling.
1347 // CR 6797058 has been filed to attempt consolidation of
1348 // the common code.
1349 // Because of the common code, if you make any changes in
1350 // the code below, please check the CMS version to see if
1351 // similar changes might be needed.
1352 // See CMSCollector::par_take_from_overflow_list() for
1353 // more extensive documentation comments.
1354 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
1355 ObjToScanQueue* work_q = par_scan_state->work_queue();
1356 // How many to take?
1357 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
1358 (size_t)ParGCDesiredObjsFromOverflowList);
1360 assert(par_scan_state->overflow_stack() == NULL, "Error");
1361 assert(!UseCompressedOops, "Error");
1362 if (_overflow_list == NULL) return false;
1364 // Otherwise, there was something there; try claiming the list.
1365 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
1366 // Trim off a prefix of at most objsFromOverflow items
1367 Thread* tid = Thread::current();
1368 size_t spin_count = (size_t)ParallelGCThreads;
1369 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
1370 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
1371 // someone grabbed it before we did ...
1372 // ... we spin for a short while...
1373 os::sleep(tid, sleep_time_millis, false);
1374 if (_overflow_list == NULL) {
1375 // nothing left to take
1376 return false;
1377 } else if (_overflow_list != BUSY) {
1378 // try and grab the prefix
1379 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
1380 }
1381 }
1382 if (prefix == NULL || prefix == BUSY) {
1383 // Nothing to take or waited long enough
1384 if (prefix == NULL) {
1385 // Write back the NULL in case we overwrote it with BUSY above
1386 // and it is still the same value.
1387 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1388 }
1389 return false;
1390 }
1391 assert(prefix != NULL && prefix != BUSY, "Error");
1392 size_t i = 1;
1393 oop cur = prefix;
1394 while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
1395 i++; cur = oop(cur->klass());
1396 }
1398 // Reattach remaining (suffix) to overflow list
1399 if (cur->klass_or_null() == NULL) {
1400 // Write back the NULL in lieu of the BUSY we wrote
1401 // above and it is still the same value.
1402 if (_overflow_list == BUSY) {
1403 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
1404 }
1405 } else {
1406 assert(cur->klass_or_null() != BUSY, "Error");
1407 oop suffix = oop(cur->klass()); // suffix will be put back on global list
1408 cur->set_klass_to_list_ptr(NULL); // break off suffix
1409 // It's possible that the list is still in the empty(busy) state
1410 // we left it in a short while ago; in that case we may be
1411 // able to place back the suffix.
1412 oop observed_overflow_list = _overflow_list;
1413 oop cur_overflow_list = observed_overflow_list;
1414 bool attached = false;
1415 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
1416 observed_overflow_list =
1417 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1418 if (cur_overflow_list == observed_overflow_list) {
1419 attached = true;
1420 break;
1421 } else cur_overflow_list = observed_overflow_list;
1422 }
1423 if (!attached) {
1424 // Too bad, someone else got in in between; we'll need to do a splice.
1425 // Find the last item of suffix list
1426 oop last = suffix;
1427 while (last->klass_or_null() != NULL) {
1428 last = oop(last->klass());
1429 }
1430 // Atomically prepend suffix to current overflow list
1431 observed_overflow_list = _overflow_list;
1432 do {
1433 cur_overflow_list = observed_overflow_list;
1434 if (cur_overflow_list != BUSY) {
1435 // Do the splice ...
1436 last->set_klass_to_list_ptr(cur_overflow_list);
1437 } else { // cur_overflow_list == BUSY
1438 last->set_klass_to_list_ptr(NULL);
1439 }
1440 observed_overflow_list =
1441 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
1442 } while (cur_overflow_list != observed_overflow_list);
1443 }
1444 }
1446 // Push objects on prefix list onto this thread's work queue
1447 assert(prefix != NULL && prefix != BUSY, "program logic");
1448 cur = prefix;
1449 ssize_t n = 0;
1450 while (cur != NULL) {
1451 oop obj_to_push = cur->forwardee();
1452 oop next = oop(cur->klass_or_null());
1453 cur->set_klass(obj_to_push->klass());
1454 // This may be an array object that is self-forwarded. In that case, the list pointer
1455 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1456 if (!is_in_reserved(cur)) {
1457 // This can become a scaling bottleneck when there is work queue overflow coincident
1458 // with promotion failure.
1459 oopDesc* f = cur;
1460 FREE_C_HEAP_ARRAY(oopDesc, f);
1461 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1462 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1463 obj_to_push = cur;
1464 }
1465 bool ok = work_q->push(obj_to_push);
1466 assert(ok, "Should have succeeded");
1467 cur = next;
1468 n++;
1469 }
1470 par_scan_state->note_overflow_refill(n);
1471 #ifndef PRODUCT
1472 assert(_num_par_pushes >= n, "Too many pops?");
1473 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1474 #endif
1475 return true;
1476 }
1477 #undef BUSY
1479 void ParNewGeneration::ref_processor_init()
1480 {
1481 if (_ref_processor == NULL) {
1482 // Allocate and initialize a reference processor
1483 _ref_processor = ReferenceProcessor::create_ref_processor(
1484 _reserved, // span
1485 refs_discovery_is_atomic(), // atomic_discovery
1486 refs_discovery_is_mt(), // mt_discovery
1487 NULL, // is_alive_non_header
1488 ParallelGCThreads,
1489 ParallelRefProcEnabled);
1490 }
1491 }
1493 const char* ParNewGeneration::name() const {
1494 return "par new generation";
1495 }