|
1 /* |
|
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" |
|
27 #include "gc_implementation/parNew/parNewGeneration.hpp" |
|
28 #include "gc_implementation/parNew/parOopClosures.inline.hpp" |
|
29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" |
|
30 #include "gc_implementation/shared/ageTable.hpp" |
|
31 #include "gc_implementation/shared/parGCAllocBuffer.hpp" |
|
32 #include "gc_implementation/shared/gcHeapSummary.hpp" |
|
33 #include "gc_implementation/shared/gcTimer.hpp" |
|
34 #include "gc_implementation/shared/gcTrace.hpp" |
|
35 #include "gc_implementation/shared/gcTraceTime.hpp" |
|
36 #include "gc_implementation/shared/copyFailedInfo.hpp" |
|
37 #include "gc_implementation/shared/spaceDecorator.hpp" |
|
38 #include "memory/defNewGeneration.inline.hpp" |
|
39 #include "memory/genCollectedHeap.hpp" |
|
40 #include "memory/genOopClosures.inline.hpp" |
|
41 #include "memory/generation.hpp" |
|
42 #include "memory/generation.inline.hpp" |
|
43 #include "memory/referencePolicy.hpp" |
|
44 #include "memory/resourceArea.hpp" |
|
45 #include "memory/sharedHeap.hpp" |
|
46 #include "memory/space.hpp" |
|
47 #include "oops/objArrayOop.hpp" |
|
48 #include "oops/oop.inline.hpp" |
|
49 #include "oops/oop.pcgc.inline.hpp" |
|
50 #include "runtime/handles.hpp" |
|
51 #include "runtime/handles.inline.hpp" |
|
52 #include "runtime/java.hpp" |
|
53 #include "runtime/thread.hpp" |
|
54 #include "utilities/copy.hpp" |
|
55 #include "utilities/globalDefinitions.hpp" |
|
56 #include "utilities/workgroup.hpp" |
|
57 |
|
58 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
|
59 |
|
60 #ifdef _MSC_VER |
|
61 #pragma warning( push ) |
|
62 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
|
63 #endif |
|
64 ParScanThreadState::ParScanThreadState(Space* to_space_, |
|
65 ParNewGeneration* gen_, |
|
66 Generation* old_gen_, |
|
67 int thread_num_, |
|
68 ObjToScanQueueSet* work_queue_set_, |
|
69 Stack<oop, mtGC>* overflow_stacks_, |
|
70 size_t desired_plab_sz_, |
|
71 ParallelTaskTerminator& term_) : |
|
72 _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), |
|
73 _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), |
|
74 _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), |
|
75 _ageTable(false), // false ==> not the global age table, no perf data. |
|
76 _to_space_alloc_buffer(desired_plab_sz_), |
|
77 _to_space_closure(gen_, this), _old_gen_closure(gen_, this), |
|
78 _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), |
|
79 _older_gen_closure(gen_, this), |
|
80 _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, |
|
81 &_to_space_root_closure, gen_, &_old_gen_root_closure, |
|
82 work_queue_set_, &term_), |
|
83 _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), |
|
84 _keep_alive_closure(&_scan_weak_ref_closure), |
|
85 _strong_roots_time(0.0), _term_time(0.0) |
|
86 { |
|
87 #if TASKQUEUE_STATS |
|
88 _term_attempts = 0; |
|
89 _overflow_refills = 0; |
|
90 _overflow_refill_objs = 0; |
|
91 #endif // TASKQUEUE_STATS |
|
92 |
|
93 _survivor_chunk_array = |
|
94 (ChunkArray*) old_gen()->get_data_recorder(thread_num()); |
|
95 _hash_seed = 17; // Might want to take time-based random value. |
|
96 _start = os::elapsedTime(); |
|
97 _old_gen_closure.set_generation(old_gen_); |
|
98 _old_gen_root_closure.set_generation(old_gen_); |
|
99 } |
|
100 #ifdef _MSC_VER |
|
101 #pragma warning( pop ) |
|
102 #endif |
|
103 |
|
104 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start, |
|
105 size_t plab_word_size) { |
|
106 ChunkArray* sca = survivor_chunk_array(); |
|
107 if (sca != NULL) { |
|
108 // A non-null SCA implies that we want the PLAB data recorded. |
|
109 sca->record_sample(plab_start, plab_word_size); |
|
110 } |
|
111 } |
|
112 |
|
113 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const { |
|
114 return new_obj->is_objArray() && |
|
115 arrayOop(new_obj)->length() > ParGCArrayScanChunk && |
|
116 new_obj != old_obj; |
|
117 } |
|
118 |
|
119 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) { |
|
120 assert(old->is_objArray(), "must be obj array"); |
|
121 assert(old->is_forwarded(), "must be forwarded"); |
|
122 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); |
|
123 assert(!old_gen()->is_in(old), "must be in young generation."); |
|
124 |
|
125 objArrayOop obj = objArrayOop(old->forwardee()); |
|
126 // Process ParGCArrayScanChunk elements now |
|
127 // and push the remainder back onto queue |
|
128 int start = arrayOop(old)->length(); |
|
129 int end = obj->length(); |
|
130 int remainder = end - start; |
|
131 assert(start <= end, "just checking"); |
|
132 if (remainder > 2 * ParGCArrayScanChunk) { |
|
133 // Test above combines last partial chunk with a full chunk |
|
134 end = start + ParGCArrayScanChunk; |
|
135 arrayOop(old)->set_length(end); |
|
136 // Push remainder. |
|
137 bool ok = work_queue()->push(old); |
|
138 assert(ok, "just popped, push must be okay"); |
|
139 } else { |
|
140 // Restore length so that it can be used if there |
|
141 // is a promotion failure and forwarding pointers |
|
142 // must be removed. |
|
143 arrayOop(old)->set_length(end); |
|
144 } |
|
145 |
|
146 // process our set of indices (include header in first chunk) |
|
147 // should make sure end is even (aligned to HeapWord in case of compressed oops) |
|
148 if ((HeapWord *)obj < young_old_boundary()) { |
|
149 // object is in to_space |
|
150 obj->oop_iterate_range(&_to_space_closure, start, end); |
|
151 } else { |
|
152 // object is in old generation |
|
153 obj->oop_iterate_range(&_old_gen_closure, start, end); |
|
154 } |
|
155 } |
|
156 |
|
157 |
|
158 void ParScanThreadState::trim_queues(int max_size) { |
|
159 ObjToScanQueue* queue = work_queue(); |
|
160 do { |
|
161 while (queue->size() > (juint)max_size) { |
|
162 oop obj_to_scan; |
|
163 if (queue->pop_local(obj_to_scan)) { |
|
164 if ((HeapWord *)obj_to_scan < young_old_boundary()) { |
|
165 if (obj_to_scan->is_objArray() && |
|
166 obj_to_scan->is_forwarded() && |
|
167 obj_to_scan->forwardee() != obj_to_scan) { |
|
168 scan_partial_array_and_push_remainder(obj_to_scan); |
|
169 } else { |
|
170 // object is in to_space |
|
171 obj_to_scan->oop_iterate(&_to_space_closure); |
|
172 } |
|
173 } else { |
|
174 // object is in old generation |
|
175 obj_to_scan->oop_iterate(&_old_gen_closure); |
|
176 } |
|
177 } |
|
178 } |
|
179 // For the case of compressed oops, we have a private, non-shared |
|
180 // overflow stack, so we eagerly drain it so as to more evenly |
|
181 // distribute load early. Note: this may be good to do in |
|
182 // general rather than delay for the final stealing phase. |
|
183 // If applicable, we'll transfer a set of objects over to our |
|
184 // work queue, allowing them to be stolen and draining our |
|
185 // private overflow stack. |
|
186 } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this)); |
|
187 } |
|
188 |
|
189 bool ParScanThreadState::take_from_overflow_stack() { |
|
190 assert(ParGCUseLocalOverflow, "Else should not call"); |
|
191 assert(young_gen()->overflow_list() == NULL, "Error"); |
|
192 ObjToScanQueue* queue = work_queue(); |
|
193 Stack<oop, mtGC>* const of_stack = overflow_stack(); |
|
194 const size_t num_overflow_elems = of_stack->size(); |
|
195 const size_t space_available = queue->max_elems() - queue->size(); |
|
196 const size_t num_take_elems = MIN3(space_available / 4, |
|
197 ParGCDesiredObjsFromOverflowList, |
|
198 num_overflow_elems); |
|
199 // Transfer the most recent num_take_elems from the overflow |
|
200 // stack to our work queue. |
|
201 for (size_t i = 0; i != num_take_elems; i++) { |
|
202 oop cur = of_stack->pop(); |
|
203 oop obj_to_push = cur->forwardee(); |
|
204 assert(Universe::heap()->is_in_reserved(cur), "Should be in heap"); |
|
205 assert(!old_gen()->is_in_reserved(cur), "Should be in young gen"); |
|
206 assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap"); |
|
207 if (should_be_partially_scanned(obj_to_push, cur)) { |
|
208 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); |
|
209 obj_to_push = cur; |
|
210 } |
|
211 bool ok = queue->push(obj_to_push); |
|
212 assert(ok, "Should have succeeded"); |
|
213 } |
|
214 assert(young_gen()->overflow_list() == NULL, "Error"); |
|
215 return num_take_elems > 0; // was something transferred? |
|
216 } |
|
217 |
|
218 void ParScanThreadState::push_on_overflow_stack(oop p) { |
|
219 assert(ParGCUseLocalOverflow, "Else should not call"); |
|
220 overflow_stack()->push(p); |
|
221 assert(young_gen()->overflow_list() == NULL, "Error"); |
|
222 } |
|
223 |
|
224 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { |
|
225 |
|
226 // Otherwise, if the object is small enough, try to reallocate the |
|
227 // buffer. |
|
228 HeapWord* obj = NULL; |
|
229 if (!_to_space_full) { |
|
230 ParGCAllocBuffer* const plab = to_space_alloc_buffer(); |
|
231 Space* const sp = to_space(); |
|
232 if (word_sz * 100 < |
|
233 ParallelGCBufferWastePct * plab->word_sz()) { |
|
234 // Is small enough; abandon this buffer and start a new one. |
|
235 plab->retire(false, false); |
|
236 size_t buf_size = plab->word_sz(); |
|
237 HeapWord* buf_space = sp->par_allocate(buf_size); |
|
238 if (buf_space == NULL) { |
|
239 const size_t min_bytes = |
|
240 ParGCAllocBuffer::min_size() << LogHeapWordSize; |
|
241 size_t free_bytes = sp->free(); |
|
242 while(buf_space == NULL && free_bytes >= min_bytes) { |
|
243 buf_size = free_bytes >> LogHeapWordSize; |
|
244 assert(buf_size == (size_t)align_object_size(buf_size), |
|
245 "Invariant"); |
|
246 buf_space = sp->par_allocate(buf_size); |
|
247 free_bytes = sp->free(); |
|
248 } |
|
249 } |
|
250 if (buf_space != NULL) { |
|
251 plab->set_word_size(buf_size); |
|
252 plab->set_buf(buf_space); |
|
253 record_survivor_plab(buf_space, buf_size); |
|
254 obj = plab->allocate(word_sz); |
|
255 // Note that we cannot compare buf_size < word_sz below |
|
256 // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). |
|
257 assert(obj != NULL || plab->words_remaining() < word_sz, |
|
258 "Else should have been able to allocate"); |
|
259 // It's conceivable that we may be able to use the |
|
260 // buffer we just grabbed for subsequent small requests |
|
261 // even if not for this one. |
|
262 } else { |
|
263 // We're used up. |
|
264 _to_space_full = true; |
|
265 } |
|
266 |
|
267 } else { |
|
268 // Too large; allocate the object individually. |
|
269 obj = sp->par_allocate(word_sz); |
|
270 } |
|
271 } |
|
272 return obj; |
|
273 } |
|
274 |
|
275 |
|
276 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, |
|
277 size_t word_sz) { |
|
278 // Is the alloc in the current alloc buffer? |
|
279 if (to_space_alloc_buffer()->contains(obj)) { |
|
280 assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), |
|
281 "Should contain whole object."); |
|
282 to_space_alloc_buffer()->undo_allocation(obj, word_sz); |
|
283 } else { |
|
284 CollectedHeap::fill_with_object(obj, word_sz); |
|
285 } |
|
286 } |
|
287 |
|
288 void ParScanThreadState::print_promotion_failure_size() { |
|
289 if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { |
|
290 gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", |
|
291 _thread_num, _promotion_failed_info.first_size()); |
|
292 } |
|
293 } |
|
294 |
|
295 class ParScanThreadStateSet: private ResourceArray { |
|
296 public: |
|
297 // Initializes states for the specified number of threads; |
|
298 ParScanThreadStateSet(int num_threads, |
|
299 Space& to_space, |
|
300 ParNewGeneration& gen, |
|
301 Generation& old_gen, |
|
302 ObjToScanQueueSet& queue_set, |
|
303 Stack<oop, mtGC>* overflow_stacks_, |
|
304 size_t desired_plab_sz, |
|
305 ParallelTaskTerminator& term); |
|
306 |
|
307 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } |
|
308 |
|
309 inline ParScanThreadState& thread_state(int i); |
|
310 |
|
311 void trace_promotion_failed(YoungGCTracer& gc_tracer); |
|
312 void reset(int active_workers, bool promotion_failed); |
|
313 void flush(); |
|
314 |
|
315 #if TASKQUEUE_STATS |
|
316 static void |
|
317 print_termination_stats_hdr(outputStream* const st = gclog_or_tty); |
|
318 void print_termination_stats(outputStream* const st = gclog_or_tty); |
|
319 static void |
|
320 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); |
|
321 void print_taskqueue_stats(outputStream* const st = gclog_or_tty); |
|
322 void reset_stats(); |
|
323 #endif // TASKQUEUE_STATS |
|
324 |
|
325 private: |
|
326 ParallelTaskTerminator& _term; |
|
327 ParNewGeneration& _gen; |
|
328 Generation& _next_gen; |
|
329 public: |
|
330 bool is_valid(int id) const { return id < length(); } |
|
331 ParallelTaskTerminator* terminator() { return &_term; } |
|
332 }; |
|
333 |
|
334 |
|
335 ParScanThreadStateSet::ParScanThreadStateSet( |
|
336 int num_threads, Space& to_space, ParNewGeneration& gen, |
|
337 Generation& old_gen, ObjToScanQueueSet& queue_set, |
|
338 Stack<oop, mtGC>* overflow_stacks, |
|
339 size_t desired_plab_sz, ParallelTaskTerminator& term) |
|
340 : ResourceArray(sizeof(ParScanThreadState), num_threads), |
|
341 _gen(gen), _next_gen(old_gen), _term(term) |
|
342 { |
|
343 assert(num_threads > 0, "sanity check!"); |
|
344 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), |
|
345 "overflow_stack allocation mismatch"); |
|
346 // Initialize states. |
|
347 for (int i = 0; i < num_threads; ++i) { |
|
348 new ((ParScanThreadState*)_data + i) |
|
349 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, |
|
350 overflow_stacks, desired_plab_sz, term); |
|
351 } |
|
352 } |
|
353 |
|
354 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) |
|
355 { |
|
356 assert(i >= 0 && i < length(), "sanity check!"); |
|
357 return ((ParScanThreadState*)_data)[i]; |
|
358 } |
|
359 |
|
360 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { |
|
361 for (int i = 0; i < length(); ++i) { |
|
362 if (thread_state(i).promotion_failed()) { |
|
363 gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); |
|
364 thread_state(i).promotion_failed_info().reset(); |
|
365 } |
|
366 } |
|
367 } |
|
368 |
|
369 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) |
|
370 { |
|
371 _term.reset_for_reuse(active_threads); |
|
372 if (promotion_failed) { |
|
373 for (int i = 0; i < length(); ++i) { |
|
374 thread_state(i).print_promotion_failure_size(); |
|
375 } |
|
376 } |
|
377 } |
|
378 |
|
379 #if TASKQUEUE_STATS |
|
380 void |
|
381 ParScanThreadState::reset_stats() |
|
382 { |
|
383 taskqueue_stats().reset(); |
|
384 _term_attempts = 0; |
|
385 _overflow_refills = 0; |
|
386 _overflow_refill_objs = 0; |
|
387 } |
|
388 |
|
389 void ParScanThreadStateSet::reset_stats() |
|
390 { |
|
391 for (int i = 0; i < length(); ++i) { |
|
392 thread_state(i).reset_stats(); |
|
393 } |
|
394 } |
|
395 |
|
396 void |
|
397 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) |
|
398 { |
|
399 st->print_raw_cr("GC Termination Stats"); |
|
400 st->print_raw_cr(" elapsed --strong roots-- " |
|
401 "-------termination-------"); |
|
402 st->print_raw_cr("thr ms ms % " |
|
403 " ms % attempts"); |
|
404 st->print_raw_cr("--- --------- --------- ------ " |
|
405 "--------- ------ --------"); |
|
406 } |
|
407 |
|
408 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) |
|
409 { |
|
410 print_termination_stats_hdr(st); |
|
411 |
|
412 for (int i = 0; i < length(); ++i) { |
|
413 const ParScanThreadState & pss = thread_state(i); |
|
414 const double elapsed_ms = pss.elapsed_time() * 1000.0; |
|
415 const double s_roots_ms = pss.strong_roots_time() * 1000.0; |
|
416 const double term_ms = pss.term_time() * 1000.0; |
|
417 st->print_cr("%3d %9.2f %9.2f %6.2f " |
|
418 "%9.2f %6.2f " SIZE_FORMAT_W(8), |
|
419 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, |
|
420 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); |
|
421 } |
|
422 } |
|
423 |
|
424 // Print stats related to work queue activity. |
|
425 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) |
|
426 { |
|
427 st->print_raw_cr("GC Task Stats"); |
|
428 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); |
|
429 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); |
|
430 } |
|
431 |
|
432 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) |
|
433 { |
|
434 print_taskqueue_stats_hdr(st); |
|
435 |
|
436 TaskQueueStats totals; |
|
437 for (int i = 0; i < length(); ++i) { |
|
438 const ParScanThreadState & pss = thread_state(i); |
|
439 const TaskQueueStats & stats = pss.taskqueue_stats(); |
|
440 st->print("%3d ", i); stats.print(st); st->cr(); |
|
441 totals += stats; |
|
442 |
|
443 if (pss.overflow_refills() > 0) { |
|
444 st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills " |
|
445 SIZE_FORMAT_W(10) " overflow objects", |
|
446 pss.overflow_refills(), pss.overflow_refill_objs()); |
|
447 } |
|
448 } |
|
449 st->print("tot "); totals.print(st); st->cr(); |
|
450 |
|
451 DEBUG_ONLY(totals.verify()); |
|
452 } |
|
453 #endif // TASKQUEUE_STATS |
|
454 |
|
455 void ParScanThreadStateSet::flush() |
|
456 { |
|
457 // Work in this loop should be kept as lightweight as |
|
458 // possible since this might otherwise become a bottleneck |
|
459 // to scaling. Should we add heavy-weight work into this |
|
460 // loop, consider parallelizing the loop into the worker threads. |
|
461 for (int i = 0; i < length(); ++i) { |
|
462 ParScanThreadState& par_scan_state = thread_state(i); |
|
463 |
|
464 // Flush stats related to To-space PLAB activity and |
|
465 // retire the last buffer. |
|
466 par_scan_state.to_space_alloc_buffer()-> |
|
467 flush_stats_and_retire(_gen.plab_stats(), |
|
468 true /* end_of_gc */, |
|
469 false /* retain */); |
|
470 |
|
471 // Every thread has its own age table. We need to merge |
|
472 // them all into one. |
|
473 ageTable *local_table = par_scan_state.age_table(); |
|
474 _gen.age_table()->merge(local_table); |
|
475 |
|
476 // Inform old gen that we're done. |
|
477 _next_gen.par_promote_alloc_done(i); |
|
478 _next_gen.par_oop_since_save_marks_iterate_done(i); |
|
479 } |
|
480 |
|
481 if (UseConcMarkSweepGC && ParallelGCThreads > 0) { |
|
482 // We need to call this even when ResizeOldPLAB is disabled |
|
483 // so as to avoid breaking some asserts. While we may be able |
|
484 // to avoid this by reorganizing the code a bit, I am loathe |
|
485 // to do that unless we find cases where ergo leads to bad |
|
486 // performance. |
|
487 CFLS_LAB::compute_desired_plab_size(); |
|
488 } |
|
489 } |
|
490 |
|
491 ParScanClosure::ParScanClosure(ParNewGeneration* g, |
|
492 ParScanThreadState* par_scan_state) : |
|
493 OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) |
|
494 { |
|
495 assert(_g->level() == 0, "Optimized for youngest generation"); |
|
496 _boundary = _g->reserved().end(); |
|
497 } |
|
498 |
|
499 void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } |
|
500 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); } |
|
501 |
|
502 void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); } |
|
503 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); } |
|
504 |
|
505 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); } |
|
506 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); } |
|
507 |
|
508 void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } |
|
509 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } |
|
510 |
|
511 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, |
|
512 ParScanThreadState* par_scan_state) |
|
513 : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) |
|
514 {} |
|
515 |
|
516 void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } |
|
517 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } |
|
518 |
|
519 #ifdef WIN32 |
|
520 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ |
|
521 #endif |
|
522 |
|
523 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( |
|
524 ParScanThreadState* par_scan_state_, |
|
525 ParScanWithoutBarrierClosure* to_space_closure_, |
|
526 ParScanWithBarrierClosure* old_gen_closure_, |
|
527 ParRootScanWithoutBarrierClosure* to_space_root_closure_, |
|
528 ParNewGeneration* par_gen_, |
|
529 ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, |
|
530 ObjToScanQueueSet* task_queues_, |
|
531 ParallelTaskTerminator* terminator_) : |
|
532 |
|
533 _par_scan_state(par_scan_state_), |
|
534 _to_space_closure(to_space_closure_), |
|
535 _old_gen_closure(old_gen_closure_), |
|
536 _to_space_root_closure(to_space_root_closure_), |
|
537 _old_gen_root_closure(old_gen_root_closure_), |
|
538 _par_gen(par_gen_), |
|
539 _task_queues(task_queues_), |
|
540 _terminator(terminator_) |
|
541 {} |
|
542 |
|
543 void ParEvacuateFollowersClosure::do_void() { |
|
544 ObjToScanQueue* work_q = par_scan_state()->work_queue(); |
|
545 |
|
546 while (true) { |
|
547 |
|
548 // Scan to-space and old-gen objs until we run out of both. |
|
549 oop obj_to_scan; |
|
550 par_scan_state()->trim_queues(0); |
|
551 |
|
552 // We have no local work, attempt to steal from other threads. |
|
553 |
|
554 // attempt to steal work from promoted. |
|
555 if (task_queues()->steal(par_scan_state()->thread_num(), |
|
556 par_scan_state()->hash_seed(), |
|
557 obj_to_scan)) { |
|
558 bool res = work_q->push(obj_to_scan); |
|
559 assert(res, "Empty queue should have room for a push."); |
|
560 |
|
561 // if successful, goto Start. |
|
562 continue; |
|
563 |
|
564 // try global overflow list. |
|
565 } else if (par_gen()->take_from_overflow_list(par_scan_state())) { |
|
566 continue; |
|
567 } |
|
568 |
|
569 // Otherwise, offer termination. |
|
570 par_scan_state()->start_term_time(); |
|
571 if (terminator()->offer_termination()) break; |
|
572 par_scan_state()->end_term_time(); |
|
573 } |
|
574 assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0, |
|
575 "Broken overflow list?"); |
|
576 // Finish the last termination pause. |
|
577 par_scan_state()->end_term_time(); |
|
578 } |
|
579 |
|
580 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, |
|
581 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : |
|
582 AbstractGangTask("ParNewGeneration collection"), |
|
583 _gen(gen), _next_gen(next_gen), |
|
584 _young_old_boundary(young_old_boundary), |
|
585 _state_set(state_set) |
|
586 {} |
|
587 |
|
588 // Reset the terminator for the given number of |
|
589 // active threads. |
|
590 void ParNewGenTask::set_for_termination(int active_workers) { |
|
591 _state_set->reset(active_workers, _gen->promotion_failed()); |
|
592 // Should the heap be passed in? There's only 1 for now so |
|
593 // grab it instead. |
|
594 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
595 gch->set_n_termination(active_workers); |
|
596 } |
|
597 |
|
598 void ParNewGenTask::work(uint worker_id) { |
|
599 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
600 // Since this is being done in a separate thread, need new resource |
|
601 // and handle marks. |
|
602 ResourceMark rm; |
|
603 HandleMark hm; |
|
604 // We would need multiple old-gen queues otherwise. |
|
605 assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); |
|
606 |
|
607 Generation* old_gen = gch->next_gen(_gen); |
|
608 |
|
609 ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); |
|
610 assert(_state_set->is_valid(worker_id), "Should not have been called"); |
|
611 |
|
612 par_scan_state.set_young_old_boundary(_young_old_boundary); |
|
613 |
|
614 KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(), |
|
615 gch->rem_set()->klass_rem_set()); |
|
616 |
|
617 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; |
|
618 |
|
619 par_scan_state.start_strong_roots(); |
|
620 gch->gen_process_strong_roots(_gen->level(), |
|
621 true, // Process younger gens, if any, |
|
622 // as strong roots. |
|
623 false, // no scope; this is parallel code |
|
624 true, // is scavenging |
|
625 SharedHeap::ScanningOption(so), |
|
626 &par_scan_state.to_space_root_closure(), |
|
627 true, // walk *all* scavengable nmethods |
|
628 &par_scan_state.older_gen_closure(), |
|
629 &klass_scan_closure); |
|
630 par_scan_state.end_strong_roots(); |
|
631 |
|
632 // "evacuate followers". |
|
633 par_scan_state.evacuate_followers_closure().do_void(); |
|
634 } |
|
635 |
|
636 #ifdef _MSC_VER |
|
637 #pragma warning( push ) |
|
638 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list |
|
639 #endif |
|
640 ParNewGeneration:: |
|
641 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) |
|
642 : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), |
|
643 _overflow_list(NULL), |
|
644 _is_alive_closure(this), |
|
645 _plab_stats(YoungPLABSize, PLABWeight) |
|
646 { |
|
647 NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) |
|
648 NOT_PRODUCT(_num_par_pushes = 0;) |
|
649 _task_queues = new ObjToScanQueueSet(ParallelGCThreads); |
|
650 guarantee(_task_queues != NULL, "task_queues allocation failure."); |
|
651 |
|
652 for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { |
|
653 ObjToScanQueue *q = new ObjToScanQueue(); |
|
654 guarantee(q != NULL, "work_queue Allocation failure."); |
|
655 _task_queues->register_queue(i1, q); |
|
656 } |
|
657 |
|
658 for (uint i2 = 0; i2 < ParallelGCThreads; i2++) |
|
659 _task_queues->queue(i2)->initialize(); |
|
660 |
|
661 _overflow_stacks = NULL; |
|
662 if (ParGCUseLocalOverflow) { |
|
663 |
|
664 // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal |
|
665 // with ',' |
|
666 typedef Stack<oop, mtGC> GCOopStack; |
|
667 |
|
668 _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); |
|
669 for (size_t i = 0; i < ParallelGCThreads; ++i) { |
|
670 new (_overflow_stacks + i) Stack<oop, mtGC>(); |
|
671 } |
|
672 } |
|
673 |
|
674 if (UsePerfData) { |
|
675 EXCEPTION_MARK; |
|
676 ResourceMark rm; |
|
677 |
|
678 const char* cname = |
|
679 PerfDataManager::counter_name(_gen_counters->name_space(), "threads"); |
|
680 PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, |
|
681 ParallelGCThreads, CHECK); |
|
682 } |
|
683 } |
|
684 #ifdef _MSC_VER |
|
685 #pragma warning( pop ) |
|
686 #endif |
|
687 |
|
688 // ParNewGeneration:: |
|
689 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : |
|
690 DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} |
|
691 |
|
692 template <class T> |
|
693 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { |
|
694 #ifdef ASSERT |
|
695 { |
|
696 assert(!oopDesc::is_null(*p), "expected non-null ref"); |
|
697 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
|
698 // We never expect to see a null reference being processed |
|
699 // as a weak reference. |
|
700 assert(obj->is_oop(), "expected an oop while scanning weak refs"); |
|
701 } |
|
702 #endif // ASSERT |
|
703 |
|
704 _par_cl->do_oop_nv(p); |
|
705 |
|
706 if (Universe::heap()->is_in_reserved(p)) { |
|
707 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
|
708 _rs->write_ref_field_gc_par(p, obj); |
|
709 } |
|
710 } |
|
711 |
|
712 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } |
|
713 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } |
|
714 |
|
715 // ParNewGeneration:: |
|
716 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : |
|
717 DefNewGeneration::KeepAliveClosure(cl) {} |
|
718 |
|
719 template <class T> |
|
720 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { |
|
721 #ifdef ASSERT |
|
722 { |
|
723 assert(!oopDesc::is_null(*p), "expected non-null ref"); |
|
724 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
|
725 // We never expect to see a null reference being processed |
|
726 // as a weak reference. |
|
727 assert(obj->is_oop(), "expected an oop while scanning weak refs"); |
|
728 } |
|
729 #endif // ASSERT |
|
730 |
|
731 _cl->do_oop_nv(p); |
|
732 |
|
733 if (Universe::heap()->is_in_reserved(p)) { |
|
734 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
|
735 _rs->write_ref_field_gc_par(p, obj); |
|
736 } |
|
737 } |
|
738 |
|
739 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); } |
|
740 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); } |
|
741 |
|
742 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) { |
|
743 T heap_oop = oopDesc::load_heap_oop(p); |
|
744 if (!oopDesc::is_null(heap_oop)) { |
|
745 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); |
|
746 if ((HeapWord*)obj < _boundary) { |
|
747 assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?"); |
|
748 oop new_obj = obj->is_forwarded() |
|
749 ? obj->forwardee() |
|
750 : _g->DefNewGeneration::copy_to_survivor_space(obj); |
|
751 oopDesc::encode_store_heap_oop_not_null(p, new_obj); |
|
752 } |
|
753 if (_gc_barrier) { |
|
754 // If p points to a younger generation, mark the card. |
|
755 if ((HeapWord*)obj < _gen_boundary) { |
|
756 _rs->write_ref_field_gc_par(p, obj); |
|
757 } |
|
758 } |
|
759 } |
|
760 } |
|
761 |
|
762 void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); } |
|
763 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); } |
|
764 |
|
765 class ParNewRefProcTaskProxy: public AbstractGangTask { |
|
766 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; |
|
767 public: |
|
768 ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, |
|
769 Generation& next_gen, |
|
770 HeapWord* young_old_boundary, |
|
771 ParScanThreadStateSet& state_set); |
|
772 |
|
773 private: |
|
774 virtual void work(uint worker_id); |
|
775 virtual void set_for_termination(int active_workers) { |
|
776 _state_set.terminator()->reset_for_reuse(active_workers); |
|
777 } |
|
778 private: |
|
779 ParNewGeneration& _gen; |
|
780 ProcessTask& _task; |
|
781 Generation& _next_gen; |
|
782 HeapWord* _young_old_boundary; |
|
783 ParScanThreadStateSet& _state_set; |
|
784 }; |
|
785 |
|
786 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( |
|
787 ProcessTask& task, ParNewGeneration& gen, |
|
788 Generation& next_gen, |
|
789 HeapWord* young_old_boundary, |
|
790 ParScanThreadStateSet& state_set) |
|
791 : AbstractGangTask("ParNewGeneration parallel reference processing"), |
|
792 _gen(gen), |
|
793 _task(task), |
|
794 _next_gen(next_gen), |
|
795 _young_old_boundary(young_old_boundary), |
|
796 _state_set(state_set) |
|
797 { |
|
798 } |
|
799 |
|
800 void ParNewRefProcTaskProxy::work(uint worker_id) |
|
801 { |
|
802 ResourceMark rm; |
|
803 HandleMark hm; |
|
804 ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); |
|
805 par_scan_state.set_young_old_boundary(_young_old_boundary); |
|
806 _task.work(worker_id, par_scan_state.is_alive_closure(), |
|
807 par_scan_state.keep_alive_closure(), |
|
808 par_scan_state.evacuate_followers_closure()); |
|
809 } |
|
810 |
|
811 class ParNewRefEnqueueTaskProxy: public AbstractGangTask { |
|
812 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; |
|
813 EnqueueTask& _task; |
|
814 |
|
815 public: |
|
816 ParNewRefEnqueueTaskProxy(EnqueueTask& task) |
|
817 : AbstractGangTask("ParNewGeneration parallel reference enqueue"), |
|
818 _task(task) |
|
819 { } |
|
820 |
|
821 virtual void work(uint worker_id) |
|
822 { |
|
823 _task.work(worker_id); |
|
824 } |
|
825 }; |
|
826 |
|
827 |
|
828 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) |
|
829 { |
|
830 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
831 assert(gch->kind() == CollectedHeap::GenCollectedHeap, |
|
832 "not a generational heap"); |
|
833 FlexibleWorkGang* workers = gch->workers(); |
|
834 assert(workers != NULL, "Need parallel worker threads."); |
|
835 _state_set.reset(workers->active_workers(), _generation.promotion_failed()); |
|
836 ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), |
|
837 _generation.reserved().end(), _state_set); |
|
838 workers->run_task(&rp_task); |
|
839 _state_set.reset(0 /* bad value in debug if not reset */, |
|
840 _generation.promotion_failed()); |
|
841 } |
|
842 |
|
843 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) |
|
844 { |
|
845 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
846 FlexibleWorkGang* workers = gch->workers(); |
|
847 assert(workers != NULL, "Need parallel worker threads."); |
|
848 ParNewRefEnqueueTaskProxy enq_task(task); |
|
849 workers->run_task(&enq_task); |
|
850 } |
|
851 |
|
852 void ParNewRefProcTaskExecutor::set_single_threaded_mode() |
|
853 { |
|
854 _state_set.flush(); |
|
855 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
856 gch->set_par_threads(0); // 0 ==> non-parallel. |
|
857 gch->save_marks(); |
|
858 } |
|
859 |
|
860 ScanClosureWithParBarrier:: |
|
861 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : |
|
862 ScanClosure(g, gc_barrier) {} |
|
863 |
|
864 EvacuateFollowersClosureGeneral:: |
|
865 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, |
|
866 OopsInGenClosure* cur, |
|
867 OopsInGenClosure* older) : |
|
868 _gch(gch), _level(level), |
|
869 _scan_cur_or_nonheap(cur), _scan_older(older) |
|
870 {} |
|
871 |
|
872 void EvacuateFollowersClosureGeneral::do_void() { |
|
873 do { |
|
874 // Beware: this call will lead to closure applications via virtual |
|
875 // calls. |
|
876 _gch->oop_since_save_marks_iterate(_level, |
|
877 _scan_cur_or_nonheap, |
|
878 _scan_older); |
|
879 } while (!_gch->no_allocs_since_save_marks(_level)); |
|
880 } |
|
881 |
|
882 |
|
883 // A Generation that does parallel young-gen collection. |
|
884 |
|
885 bool ParNewGeneration::_avoid_promotion_undo = false; |
|
886 |
|
887 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { |
|
888 assert(_promo_failure_scan_stack.is_empty(), "post condition"); |
|
889 _promo_failure_scan_stack.clear(true); // Clear cached segments. |
|
890 |
|
891 remove_forwarding_pointers(); |
|
892 if (PrintGCDetails) { |
|
893 gclog_or_tty->print(" (promotion failed)"); |
|
894 } |
|
895 // All the spaces are in play for mark-sweep. |
|
896 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. |
|
897 from()->set_next_compaction_space(to()); |
|
898 gch->set_incremental_collection_failed(); |
|
899 // Inform the next generation that a promotion failure occurred. |
|
900 _next_gen->promotion_failure_occurred(); |
|
901 |
|
902 // Trace promotion failure in the parallel GC threads |
|
903 thread_state_set.trace_promotion_failed(gc_tracer); |
|
904 // Single threaded code may have reported promotion failure to the global state |
|
905 if (_promotion_failed_info.has_failed()) { |
|
906 gc_tracer.report_promotion_failed(_promotion_failed_info); |
|
907 } |
|
908 // Reset the PromotionFailureALot counters. |
|
909 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) |
|
910 } |
|
911 |
|
912 void ParNewGeneration::collect(bool full, |
|
913 bool clear_all_soft_refs, |
|
914 size_t size, |
|
915 bool is_tlab) { |
|
916 assert(full || size > 0, "otherwise we don't want to collect"); |
|
917 |
|
918 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
919 |
|
920 _gc_timer->register_gc_start(); |
|
921 |
|
922 assert(gch->kind() == CollectedHeap::GenCollectedHeap, |
|
923 "not a CMS generational heap"); |
|
924 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); |
|
925 FlexibleWorkGang* workers = gch->workers(); |
|
926 assert(workers != NULL, "Need workgang for parallel work"); |
|
927 int active_workers = |
|
928 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), |
|
929 workers->active_workers(), |
|
930 Threads::number_of_non_daemon_threads()); |
|
931 workers->set_active_workers(active_workers); |
|
932 assert(gch->n_gens() == 2, |
|
933 "Par collection currently only works with single older gen."); |
|
934 _next_gen = gch->next_gen(this); |
|
935 // Do we have to avoid promotion_undo? |
|
936 if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { |
|
937 set_avoid_promotion_undo(true); |
|
938 } |
|
939 |
|
940 // If the next generation is too full to accommodate worst-case promotion |
|
941 // from this generation, pass on collection; let the next generation |
|
942 // do it. |
|
943 if (!collection_attempt_is_safe()) { |
|
944 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one |
|
945 return; |
|
946 } |
|
947 assert(to()->is_empty(), "Else not collection_attempt_is_safe"); |
|
948 |
|
949 ParNewTracer gc_tracer; |
|
950 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); |
|
951 gch->trace_heap_before_gc(&gc_tracer); |
|
952 |
|
953 init_assuming_no_promotion_failure(); |
|
954 |
|
955 if (UseAdaptiveSizePolicy) { |
|
956 set_survivor_overflow(false); |
|
957 size_policy->minor_collection_begin(); |
|
958 } |
|
959 |
|
960 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); |
|
961 // Capture heap used before collection (for printing). |
|
962 size_t gch_prev_used = gch->used(); |
|
963 |
|
964 SpecializationStats::clear(); |
|
965 |
|
966 age_table()->clear(); |
|
967 to()->clear(SpaceDecorator::Mangle); |
|
968 |
|
969 gch->save_marks(); |
|
970 assert(workers != NULL, "Need parallel worker threads."); |
|
971 int n_workers = active_workers; |
|
972 |
|
973 // Set the correct parallelism (number of queues) in the reference processor |
|
974 ref_processor()->set_active_mt_degree(n_workers); |
|
975 |
|
976 // Always set the terminator for the active number of workers |
|
977 // because only those workers go through the termination protocol. |
|
978 ParallelTaskTerminator _term(n_workers, task_queues()); |
|
979 ParScanThreadStateSet thread_state_set(workers->active_workers(), |
|
980 *to(), *this, *_next_gen, *task_queues(), |
|
981 _overflow_stacks, desired_plab_sz(), _term); |
|
982 |
|
983 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); |
|
984 gch->set_par_threads(n_workers); |
|
985 gch->rem_set()->prepare_for_younger_refs_iterate(true); |
|
986 // It turns out that even when we're using 1 thread, doing the work in a |
|
987 // separate thread causes wide variance in run times. We can't help this |
|
988 // in the multi-threaded case, but we special-case n=1 here to get |
|
989 // repeatable measurements of the 1-thread overhead of the parallel code. |
|
990 if (n_workers > 1) { |
|
991 GenCollectedHeap::StrongRootsScope srs(gch); |
|
992 workers->run_task(&tsk); |
|
993 } else { |
|
994 GenCollectedHeap::StrongRootsScope srs(gch); |
|
995 tsk.work(0); |
|
996 } |
|
997 thread_state_set.reset(0 /* Bad value in debug if not reset */, |
|
998 promotion_failed()); |
|
999 |
|
1000 // Process (weak) reference objects found during scavenge. |
|
1001 ReferenceProcessor* rp = ref_processor(); |
|
1002 IsAliveClosure is_alive(this); |
|
1003 ScanWeakRefClosure scan_weak_ref(this); |
|
1004 KeepAliveClosure keep_alive(&scan_weak_ref); |
|
1005 ScanClosure scan_without_gc_barrier(this, false); |
|
1006 ScanClosureWithParBarrier scan_with_gc_barrier(this, true); |
|
1007 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); |
|
1008 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, |
|
1009 &scan_without_gc_barrier, &scan_with_gc_barrier); |
|
1010 rp->setup_policy(clear_all_soft_refs); |
|
1011 // Can the mt_degree be set later (at run_task() time would be best)? |
|
1012 rp->set_active_mt_degree(active_workers); |
|
1013 ReferenceProcessorStats stats; |
|
1014 if (rp->processing_is_mt()) { |
|
1015 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); |
|
1016 stats = rp->process_discovered_references(&is_alive, &keep_alive, |
|
1017 &evacuate_followers, &task_executor, |
|
1018 _gc_timer); |
|
1019 } else { |
|
1020 thread_state_set.flush(); |
|
1021 gch->set_par_threads(0); // 0 ==> non-parallel. |
|
1022 gch->save_marks(); |
|
1023 stats = rp->process_discovered_references(&is_alive, &keep_alive, |
|
1024 &evacuate_followers, NULL, |
|
1025 _gc_timer); |
|
1026 } |
|
1027 gc_tracer.report_gc_reference_stats(stats); |
|
1028 if (!promotion_failed()) { |
|
1029 // Swap the survivor spaces. |
|
1030 eden()->clear(SpaceDecorator::Mangle); |
|
1031 from()->clear(SpaceDecorator::Mangle); |
|
1032 if (ZapUnusedHeapArea) { |
|
1033 // This is now done here because of the piece-meal mangling which |
|
1034 // can check for valid mangling at intermediate points in the |
|
1035 // collection(s). When a minor collection fails to collect |
|
1036 // sufficient space resizing of the young generation can occur |
|
1037 // an redistribute the spaces in the young generation. Mangle |
|
1038 // here so that unzapped regions don't get distributed to |
|
1039 // other spaces. |
|
1040 to()->mangle_unused_area(); |
|
1041 } |
|
1042 swap_spaces(); |
|
1043 |
|
1044 // A successful scavenge should restart the GC time limit count which is |
|
1045 // for full GC's. |
|
1046 size_policy->reset_gc_overhead_limit_count(); |
|
1047 |
|
1048 assert(to()->is_empty(), "to space should be empty now"); |
|
1049 |
|
1050 adjust_desired_tenuring_threshold(); |
|
1051 } else { |
|
1052 handle_promotion_failed(gch, thread_state_set, gc_tracer); |
|
1053 } |
|
1054 // set new iteration safe limit for the survivor spaces |
|
1055 from()->set_concurrent_iteration_safe_limit(from()->top()); |
|
1056 to()->set_concurrent_iteration_safe_limit(to()->top()); |
|
1057 |
|
1058 if (ResizePLAB) { |
|
1059 plab_stats()->adjust_desired_plab_sz(n_workers); |
|
1060 } |
|
1061 |
|
1062 if (PrintGC && !PrintGCDetails) { |
|
1063 gch->print_heap_change(gch_prev_used); |
|
1064 } |
|
1065 |
|
1066 if (PrintGCDetails && ParallelGCVerbose) { |
|
1067 TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); |
|
1068 TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); |
|
1069 } |
|
1070 |
|
1071 if (UseAdaptiveSizePolicy) { |
|
1072 size_policy->minor_collection_end(gch->gc_cause()); |
|
1073 size_policy->avg_survived()->sample(from()->used()); |
|
1074 } |
|
1075 |
|
1076 // We need to use a monotonically non-deccreasing time in ms |
|
1077 // or we will see time-warp warnings and os::javaTimeMillis() |
|
1078 // does not guarantee monotonicity. |
|
1079 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
|
1080 update_time_of_last_gc(now); |
|
1081 |
|
1082 SpecializationStats::print(); |
|
1083 |
|
1084 rp->set_enqueuing_is_done(true); |
|
1085 if (rp->processing_is_mt()) { |
|
1086 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); |
|
1087 rp->enqueue_discovered_references(&task_executor); |
|
1088 } else { |
|
1089 rp->enqueue_discovered_references(NULL); |
|
1090 } |
|
1091 rp->verify_no_references_recorded(); |
|
1092 |
|
1093 gch->trace_heap_after_gc(&gc_tracer); |
|
1094 gc_tracer.report_tenuring_threshold(tenuring_threshold()); |
|
1095 |
|
1096 _gc_timer->register_gc_end(); |
|
1097 |
|
1098 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); |
|
1099 } |
|
1100 |
|
1101 static int sum; |
|
1102 void ParNewGeneration::waste_some_time() { |
|
1103 for (int i = 0; i < 100; i++) { |
|
1104 sum += i; |
|
1105 } |
|
1106 } |
|
1107 |
|
1108 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4); |
|
1109 |
|
1110 // Because of concurrency, there are times where an object for which |
|
1111 // "is_forwarded()" is true contains an "interim" forwarding pointer |
|
1112 // value. Such a value will soon be overwritten with a real value. |
|
1113 // This method requires "obj" to have a forwarding pointer, and waits, if |
|
1114 // necessary for a real one to be inserted, and returns it. |
|
1115 |
|
1116 oop ParNewGeneration::real_forwardee(oop obj) { |
|
1117 oop forward_ptr = obj->forwardee(); |
|
1118 if (forward_ptr != ClaimedForwardPtr) { |
|
1119 return forward_ptr; |
|
1120 } else { |
|
1121 return real_forwardee_slow(obj); |
|
1122 } |
|
1123 } |
|
1124 |
|
1125 oop ParNewGeneration::real_forwardee_slow(oop obj) { |
|
1126 // Spin-read if it is claimed but not yet written by another thread. |
|
1127 oop forward_ptr = obj->forwardee(); |
|
1128 while (forward_ptr == ClaimedForwardPtr) { |
|
1129 waste_some_time(); |
|
1130 assert(obj->is_forwarded(), "precondition"); |
|
1131 forward_ptr = obj->forwardee(); |
|
1132 } |
|
1133 return forward_ptr; |
|
1134 } |
|
1135 |
|
1136 #ifdef ASSERT |
|
1137 bool ParNewGeneration::is_legal_forward_ptr(oop p) { |
|
1138 return |
|
1139 (_avoid_promotion_undo && p == ClaimedForwardPtr) |
|
1140 || Universe::heap()->is_in_reserved(p); |
|
1141 } |
|
1142 #endif |
|
1143 |
|
1144 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { |
|
1145 if (m->must_be_preserved_for_promotion_failure(obj)) { |
|
1146 // We should really have separate per-worker stacks, rather |
|
1147 // than use locking of a common pair of stacks. |
|
1148 MutexLocker ml(ParGCRareEvent_lock); |
|
1149 preserve_mark(obj, m); |
|
1150 } |
|
1151 } |
|
1152 |
|
1153 // Multiple GC threads may try to promote an object. If the object |
|
1154 // is successfully promoted, a forwarding pointer will be installed in |
|
1155 // the object in the young generation. This method claims the right |
|
1156 // to install the forwarding pointer before it copies the object, |
|
1157 // thus avoiding the need to undo the copy as in |
|
1158 // copy_to_survivor_space_avoiding_with_undo. |
|
1159 |
|
1160 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo( |
|
1161 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { |
|
1162 // In the sequential version, this assert also says that the object is |
|
1163 // not forwarded. That might not be the case here. It is the case that |
|
1164 // the caller observed it to be not forwarded at some time in the past. |
|
1165 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); |
|
1166 |
|
1167 // The sequential code read "old->age()" below. That doesn't work here, |
|
1168 // since the age is in the mark word, and that might be overwritten with |
|
1169 // a forwarding pointer by a parallel thread. So we must save the mark |
|
1170 // word in a local and then analyze it. |
|
1171 oopDesc dummyOld; |
|
1172 dummyOld.set_mark(m); |
|
1173 assert(!dummyOld.is_forwarded(), |
|
1174 "should not be called with forwarding pointer mark word."); |
|
1175 |
|
1176 oop new_obj = NULL; |
|
1177 oop forward_ptr; |
|
1178 |
|
1179 // Try allocating obj in to-space (unless too old) |
|
1180 if (dummyOld.age() < tenuring_threshold()) { |
|
1181 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); |
|
1182 if (new_obj == NULL) { |
|
1183 set_survivor_overflow(true); |
|
1184 } |
|
1185 } |
|
1186 |
|
1187 if (new_obj == NULL) { |
|
1188 // Either to-space is full or we decided to promote |
|
1189 // try allocating obj tenured |
|
1190 |
|
1191 // Attempt to install a null forwarding pointer (atomically), |
|
1192 // to claim the right to install the real forwarding pointer. |
|
1193 forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); |
|
1194 if (forward_ptr != NULL) { |
|
1195 // someone else beat us to it. |
|
1196 return real_forwardee(old); |
|
1197 } |
|
1198 |
|
1199 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), |
|
1200 old, m, sz); |
|
1201 |
|
1202 if (new_obj == NULL) { |
|
1203 // promotion failed, forward to self |
|
1204 _promotion_failed = true; |
|
1205 new_obj = old; |
|
1206 |
|
1207 preserve_mark_if_necessary(old, m); |
|
1208 par_scan_state->register_promotion_failure(sz); |
|
1209 } |
|
1210 |
|
1211 old->forward_to(new_obj); |
|
1212 forward_ptr = NULL; |
|
1213 } else { |
|
1214 // Is in to-space; do copying ourselves. |
|
1215 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); |
|
1216 forward_ptr = old->forward_to_atomic(new_obj); |
|
1217 // Restore the mark word copied above. |
|
1218 new_obj->set_mark(m); |
|
1219 // Increment age if obj still in new generation |
|
1220 new_obj->incr_age(); |
|
1221 par_scan_state->age_table()->add(new_obj, sz); |
|
1222 } |
|
1223 assert(new_obj != NULL, "just checking"); |
|
1224 |
|
1225 #ifndef PRODUCT |
|
1226 // This code must come after the CAS test, or it will print incorrect |
|
1227 // information. |
|
1228 if (TraceScavenge) { |
|
1229 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", |
|
1230 is_in_reserved(new_obj) ? "copying" : "tenuring", |
|
1231 new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); |
|
1232 } |
|
1233 #endif |
|
1234 |
|
1235 if (forward_ptr == NULL) { |
|
1236 oop obj_to_push = new_obj; |
|
1237 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { |
|
1238 // Length field used as index of next element to be scanned. |
|
1239 // Real length can be obtained from real_forwardee() |
|
1240 arrayOop(old)->set_length(0); |
|
1241 obj_to_push = old; |
|
1242 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, |
|
1243 "push forwarded object"); |
|
1244 } |
|
1245 // Push it on one of the queues of to-be-scanned objects. |
|
1246 bool simulate_overflow = false; |
|
1247 NOT_PRODUCT( |
|
1248 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { |
|
1249 // simulate a stack overflow |
|
1250 simulate_overflow = true; |
|
1251 } |
|
1252 ) |
|
1253 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { |
|
1254 // Add stats for overflow pushes. |
|
1255 if (Verbose && PrintGCDetails) { |
|
1256 gclog_or_tty->print("queue overflow!\n"); |
|
1257 } |
|
1258 push_on_overflow_list(old, par_scan_state); |
|
1259 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); |
|
1260 } |
|
1261 |
|
1262 return new_obj; |
|
1263 } |
|
1264 |
|
1265 // Oops. Someone beat us to it. Undo the allocation. Where did we |
|
1266 // allocate it? |
|
1267 if (is_in_reserved(new_obj)) { |
|
1268 // Must be in to_space. |
|
1269 assert(to()->is_in_reserved(new_obj), "Checking"); |
|
1270 if (forward_ptr == ClaimedForwardPtr) { |
|
1271 // Wait to get the real forwarding pointer value. |
|
1272 forward_ptr = real_forwardee(old); |
|
1273 } |
|
1274 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); |
|
1275 } |
|
1276 |
|
1277 return forward_ptr; |
|
1278 } |
|
1279 |
|
1280 |
|
1281 // Multiple GC threads may try to promote the same object. If two |
|
1282 // or more GC threads copy the object, only one wins the race to install |
|
1283 // the forwarding pointer. The other threads have to undo their copy. |
|
1284 |
|
1285 oop ParNewGeneration::copy_to_survivor_space_with_undo( |
|
1286 ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { |
|
1287 |
|
1288 // In the sequential version, this assert also says that the object is |
|
1289 // not forwarded. That might not be the case here. It is the case that |
|
1290 // the caller observed it to be not forwarded at some time in the past. |
|
1291 assert(is_in_reserved(old), "shouldn't be scavenging this oop"); |
|
1292 |
|
1293 // The sequential code read "old->age()" below. That doesn't work here, |
|
1294 // since the age is in the mark word, and that might be overwritten with |
|
1295 // a forwarding pointer by a parallel thread. So we must save the mark |
|
1296 // word here, install it in a local oopDesc, and then analyze it. |
|
1297 oopDesc dummyOld; |
|
1298 dummyOld.set_mark(m); |
|
1299 assert(!dummyOld.is_forwarded(), |
|
1300 "should not be called with forwarding pointer mark word."); |
|
1301 |
|
1302 bool failed_to_promote = false; |
|
1303 oop new_obj = NULL; |
|
1304 oop forward_ptr; |
|
1305 |
|
1306 // Try allocating obj in to-space (unless too old) |
|
1307 if (dummyOld.age() < tenuring_threshold()) { |
|
1308 new_obj = (oop)par_scan_state->alloc_in_to_space(sz); |
|
1309 if (new_obj == NULL) { |
|
1310 set_survivor_overflow(true); |
|
1311 } |
|
1312 } |
|
1313 |
|
1314 if (new_obj == NULL) { |
|
1315 // Either to-space is full or we decided to promote |
|
1316 // try allocating obj tenured |
|
1317 new_obj = _next_gen->par_promote(par_scan_state->thread_num(), |
|
1318 old, m, sz); |
|
1319 |
|
1320 if (new_obj == NULL) { |
|
1321 // promotion failed, forward to self |
|
1322 forward_ptr = old->forward_to_atomic(old); |
|
1323 new_obj = old; |
|
1324 |
|
1325 if (forward_ptr != NULL) { |
|
1326 return forward_ptr; // someone else succeeded |
|
1327 } |
|
1328 |
|
1329 _promotion_failed = true; |
|
1330 failed_to_promote = true; |
|
1331 |
|
1332 preserve_mark_if_necessary(old, m); |
|
1333 par_scan_state->register_promotion_failure(sz); |
|
1334 } |
|
1335 } else { |
|
1336 // Is in to-space; do copying ourselves. |
|
1337 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); |
|
1338 // Restore the mark word copied above. |
|
1339 new_obj->set_mark(m); |
|
1340 // Increment age if new_obj still in new generation |
|
1341 new_obj->incr_age(); |
|
1342 par_scan_state->age_table()->add(new_obj, sz); |
|
1343 } |
|
1344 assert(new_obj != NULL, "just checking"); |
|
1345 |
|
1346 #ifndef PRODUCT |
|
1347 // This code must come after the CAS test, or it will print incorrect |
|
1348 // information. |
|
1349 if (TraceScavenge) { |
|
1350 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", |
|
1351 is_in_reserved(new_obj) ? "copying" : "tenuring", |
|
1352 new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); |
|
1353 } |
|
1354 #endif |
|
1355 |
|
1356 // Now attempt to install the forwarding pointer (atomically). |
|
1357 // We have to copy the mark word before overwriting with forwarding |
|
1358 // ptr, so we can restore it below in the copy. |
|
1359 if (!failed_to_promote) { |
|
1360 forward_ptr = old->forward_to_atomic(new_obj); |
|
1361 } |
|
1362 |
|
1363 if (forward_ptr == NULL) { |
|
1364 oop obj_to_push = new_obj; |
|
1365 if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) { |
|
1366 // Length field used as index of next element to be scanned. |
|
1367 // Real length can be obtained from real_forwardee() |
|
1368 arrayOop(old)->set_length(0); |
|
1369 obj_to_push = old; |
|
1370 assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push, |
|
1371 "push forwarded object"); |
|
1372 } |
|
1373 // Push it on one of the queues of to-be-scanned objects. |
|
1374 bool simulate_overflow = false; |
|
1375 NOT_PRODUCT( |
|
1376 if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) { |
|
1377 // simulate a stack overflow |
|
1378 simulate_overflow = true; |
|
1379 } |
|
1380 ) |
|
1381 if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { |
|
1382 // Add stats for overflow pushes. |
|
1383 push_on_overflow_list(old, par_scan_state); |
|
1384 TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); |
|
1385 } |
|
1386 |
|
1387 return new_obj; |
|
1388 } |
|
1389 |
|
1390 // Oops. Someone beat us to it. Undo the allocation. Where did we |
|
1391 // allocate it? |
|
1392 if (is_in_reserved(new_obj)) { |
|
1393 // Must be in to_space. |
|
1394 assert(to()->is_in_reserved(new_obj), "Checking"); |
|
1395 par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); |
|
1396 } else { |
|
1397 assert(!_avoid_promotion_undo, "Should not be here if avoiding."); |
|
1398 _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), |
|
1399 (HeapWord*)new_obj, sz); |
|
1400 } |
|
1401 |
|
1402 return forward_ptr; |
|
1403 } |
|
1404 |
|
1405 #ifndef PRODUCT |
|
1406 // It's OK to call this multi-threaded; the worst thing |
|
1407 // that can happen is that we'll get a bunch of closely |
|
1408 // spaced simulated oveflows, but that's OK, in fact |
|
1409 // probably good as it would exercise the overflow code |
|
1410 // under contention. |
|
1411 bool ParNewGeneration::should_simulate_overflow() { |
|
1412 if (_overflow_counter-- <= 0) { // just being defensive |
|
1413 _overflow_counter = ParGCWorkQueueOverflowInterval; |
|
1414 return true; |
|
1415 } else { |
|
1416 return false; |
|
1417 } |
|
1418 } |
|
1419 #endif |
|
1420 |
|
1421 // In case we are using compressed oops, we need to be careful. |
|
1422 // If the object being pushed is an object array, then its length |
|
1423 // field keeps track of the "grey boundary" at which the next |
|
1424 // incremental scan will be done (see ParGCArrayScanChunk). |
|
1425 // When using compressed oops, this length field is kept in the |
|
1426 // lower 32 bits of the erstwhile klass word and cannot be used |
|
1427 // for the overflow chaining pointer (OCP below). As such the OCP |
|
1428 // would itself need to be compressed into the top 32-bits in this |
|
1429 // case. Unfortunately, see below, in the event that we have a |
|
1430 // promotion failure, the node to be pushed on the list can be |
|
1431 // outside of the Java heap, so the heap-based pointer compression |
|
1432 // would not work (we would have potential aliasing between C-heap |
|
1433 // and Java-heap pointers). For this reason, when using compressed |
|
1434 // oops, we simply use a worker-thread-local, non-shared overflow |
|
1435 // list in the form of a growable array, with a slightly different |
|
1436 // overflow stack draining strategy. If/when we start using fat |
|
1437 // stacks here, we can go back to using (fat) pointer chains |
|
1438 // (although some performance comparisons would be useful since |
|
1439 // single global lists have their own performance disadvantages |
|
1440 // as we were made painfully aware not long ago, see 6786503). |
|
1441 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) |
|
1442 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) { |
|
1443 assert(is_in_reserved(from_space_obj), "Should be from this generation"); |
|
1444 if (ParGCUseLocalOverflow) { |
|
1445 // In the case of compressed oops, we use a private, not-shared |
|
1446 // overflow stack. |
|
1447 par_scan_state->push_on_overflow_stack(from_space_obj); |
|
1448 } else { |
|
1449 assert(!UseCompressedOops, "Error"); |
|
1450 // if the object has been forwarded to itself, then we cannot |
|
1451 // use the klass pointer for the linked list. Instead we have |
|
1452 // to allocate an oopDesc in the C-Heap and use that for the linked list. |
|
1453 // XXX This is horribly inefficient when a promotion failure occurs |
|
1454 // and should be fixed. XXX FIX ME !!! |
|
1455 #ifndef PRODUCT |
|
1456 Atomic::inc_ptr(&_num_par_pushes); |
|
1457 assert(_num_par_pushes > 0, "Tautology"); |
|
1458 #endif |
|
1459 if (from_space_obj->forwardee() == from_space_obj) { |
|
1460 oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); |
|
1461 listhead->forward_to(from_space_obj); |
|
1462 from_space_obj = listhead; |
|
1463 } |
|
1464 oop observed_overflow_list = _overflow_list; |
|
1465 oop cur_overflow_list; |
|
1466 do { |
|
1467 cur_overflow_list = observed_overflow_list; |
|
1468 if (cur_overflow_list != BUSY) { |
|
1469 from_space_obj->set_klass_to_list_ptr(cur_overflow_list); |
|
1470 } else { |
|
1471 from_space_obj->set_klass_to_list_ptr(NULL); |
|
1472 } |
|
1473 observed_overflow_list = |
|
1474 (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list); |
|
1475 } while (cur_overflow_list != observed_overflow_list); |
|
1476 } |
|
1477 } |
|
1478 |
|
1479 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) { |
|
1480 bool res; |
|
1481 |
|
1482 if (ParGCUseLocalOverflow) { |
|
1483 res = par_scan_state->take_from_overflow_stack(); |
|
1484 } else { |
|
1485 assert(!UseCompressedOops, "Error"); |
|
1486 res = take_from_overflow_list_work(par_scan_state); |
|
1487 } |
|
1488 return res; |
|
1489 } |
|
1490 |
|
1491 |
|
1492 // *NOTE*: The overflow list manipulation code here and |
|
1493 // in CMSCollector:: are very similar in shape, |
|
1494 // except that in the CMS case we thread the objects |
|
1495 // directly into the list via their mark word, and do |
|
1496 // not need to deal with special cases below related |
|
1497 // to chunking of object arrays and promotion failure |
|
1498 // handling. |
|
1499 // CR 6797058 has been filed to attempt consolidation of |
|
1500 // the common code. |
|
1501 // Because of the common code, if you make any changes in |
|
1502 // the code below, please check the CMS version to see if |
|
1503 // similar changes might be needed. |
|
1504 // See CMSCollector::par_take_from_overflow_list() for |
|
1505 // more extensive documentation comments. |
|
1506 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) { |
|
1507 ObjToScanQueue* work_q = par_scan_state->work_queue(); |
|
1508 // How many to take? |
|
1509 size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, |
|
1510 (size_t)ParGCDesiredObjsFromOverflowList); |
|
1511 |
|
1512 assert(!UseCompressedOops, "Error"); |
|
1513 assert(par_scan_state->overflow_stack() == NULL, "Error"); |
|
1514 if (_overflow_list == NULL) return false; |
|
1515 |
|
1516 // Otherwise, there was something there; try claiming the list. |
|
1517 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); |
|
1518 // Trim off a prefix of at most objsFromOverflow items |
|
1519 Thread* tid = Thread::current(); |
|
1520 size_t spin_count = (size_t)ParallelGCThreads; |
|
1521 size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100); |
|
1522 for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) { |
|
1523 // someone grabbed it before we did ... |
|
1524 // ... we spin for a short while... |
|
1525 os::sleep(tid, sleep_time_millis, false); |
|
1526 if (_overflow_list == NULL) { |
|
1527 // nothing left to take |
|
1528 return false; |
|
1529 } else if (_overflow_list != BUSY) { |
|
1530 // try and grab the prefix |
|
1531 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); |
|
1532 } |
|
1533 } |
|
1534 if (prefix == NULL || prefix == BUSY) { |
|
1535 // Nothing to take or waited long enough |
|
1536 if (prefix == NULL) { |
|
1537 // Write back the NULL in case we overwrote it with BUSY above |
|
1538 // and it is still the same value. |
|
1539 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); |
|
1540 } |
|
1541 return false; |
|
1542 } |
|
1543 assert(prefix != NULL && prefix != BUSY, "Error"); |
|
1544 size_t i = 1; |
|
1545 oop cur = prefix; |
|
1546 while (i < objsFromOverflow && cur->klass_or_null() != NULL) { |
|
1547 i++; cur = cur->list_ptr_from_klass(); |
|
1548 } |
|
1549 |
|
1550 // Reattach remaining (suffix) to overflow list |
|
1551 if (cur->klass_or_null() == NULL) { |
|
1552 // Write back the NULL in lieu of the BUSY we wrote |
|
1553 // above and it is still the same value. |
|
1554 if (_overflow_list == BUSY) { |
|
1555 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); |
|
1556 } |
|
1557 } else { |
|
1558 assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); |
|
1559 oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list |
|
1560 cur->set_klass_to_list_ptr(NULL); // break off suffix |
|
1561 // It's possible that the list is still in the empty(busy) state |
|
1562 // we left it in a short while ago; in that case we may be |
|
1563 // able to place back the suffix. |
|
1564 oop observed_overflow_list = _overflow_list; |
|
1565 oop cur_overflow_list = observed_overflow_list; |
|
1566 bool attached = false; |
|
1567 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { |
|
1568 observed_overflow_list = |
|
1569 (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); |
|
1570 if (cur_overflow_list == observed_overflow_list) { |
|
1571 attached = true; |
|
1572 break; |
|
1573 } else cur_overflow_list = observed_overflow_list; |
|
1574 } |
|
1575 if (!attached) { |
|
1576 // Too bad, someone else got in in between; we'll need to do a splice. |
|
1577 // Find the last item of suffix list |
|
1578 oop last = suffix; |
|
1579 while (last->klass_or_null() != NULL) { |
|
1580 last = last->list_ptr_from_klass(); |
|
1581 } |
|
1582 // Atomically prepend suffix to current overflow list |
|
1583 observed_overflow_list = _overflow_list; |
|
1584 do { |
|
1585 cur_overflow_list = observed_overflow_list; |
|
1586 if (cur_overflow_list != BUSY) { |
|
1587 // Do the splice ... |
|
1588 last->set_klass_to_list_ptr(cur_overflow_list); |
|
1589 } else { // cur_overflow_list == BUSY |
|
1590 last->set_klass_to_list_ptr(NULL); |
|
1591 } |
|
1592 observed_overflow_list = |
|
1593 (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list); |
|
1594 } while (cur_overflow_list != observed_overflow_list); |
|
1595 } |
|
1596 } |
|
1597 |
|
1598 // Push objects on prefix list onto this thread's work queue |
|
1599 assert(prefix != NULL && prefix != BUSY, "program logic"); |
|
1600 cur = prefix; |
|
1601 ssize_t n = 0; |
|
1602 while (cur != NULL) { |
|
1603 oop obj_to_push = cur->forwardee(); |
|
1604 oop next = cur->list_ptr_from_klass(); |
|
1605 cur->set_klass(obj_to_push->klass()); |
|
1606 // This may be an array object that is self-forwarded. In that case, the list pointer |
|
1607 // space, cur, is not in the Java heap, but rather in the C-heap and should be freed. |
|
1608 if (!is_in_reserved(cur)) { |
|
1609 // This can become a scaling bottleneck when there is work queue overflow coincident |
|
1610 // with promotion failure. |
|
1611 oopDesc* f = cur; |
|
1612 FREE_C_HEAP_ARRAY(oopDesc, f, mtGC); |
|
1613 } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { |
|
1614 assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); |
|
1615 obj_to_push = cur; |
|
1616 } |
|
1617 bool ok = work_q->push(obj_to_push); |
|
1618 assert(ok, "Should have succeeded"); |
|
1619 cur = next; |
|
1620 n++; |
|
1621 } |
|
1622 TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n)); |
|
1623 #ifndef PRODUCT |
|
1624 assert(_num_par_pushes >= n, "Too many pops?"); |
|
1625 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); |
|
1626 #endif |
|
1627 return true; |
|
1628 } |
|
1629 #undef BUSY |
|
1630 |
|
1631 void ParNewGeneration::ref_processor_init() { |
|
1632 if (_ref_processor == NULL) { |
|
1633 // Allocate and initialize a reference processor |
|
1634 _ref_processor = |
|
1635 new ReferenceProcessor(_reserved, // span |
|
1636 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing |
|
1637 (int) ParallelGCThreads, // mt processing degree |
|
1638 refs_discovery_is_mt(), // mt discovery |
|
1639 (int) ParallelGCThreads, // mt discovery degree |
|
1640 refs_discovery_is_atomic(), // atomic_discovery |
|
1641 NULL); // is_alive_non_header |
|
1642 } |
|
1643 } |
|
1644 |
|
1645 const char* ParNewGeneration::name() const { |
|
1646 return "par new generation"; |
|
1647 } |