Thu, 11 Dec 2008 12:05:08 -0800
6578152: fill_region_with_object has usability and safety issues
Reviewed-by: apetrusenko, ysr
1 /*
2 * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_psPromotionManager.cpp.incl"
28 PSPromotionManager** PSPromotionManager::_manager_array = NULL;
29 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
30 OopTaskQueueSet* PSPromotionManager::_stack_array_breadth = NULL;
31 PSOldGen* PSPromotionManager::_old_gen = NULL;
32 MutableSpace* PSPromotionManager::_young_space = NULL;
34 void PSPromotionManager::initialize() {
35 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
36 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
38 _old_gen = heap->old_gen();
39 _young_space = heap->young_gen()->to_space();
41 assert(_manager_array == NULL, "Attempt to initialize twice");
42 _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 );
43 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
45 if (UseDepthFirstScavengeOrder) {
46 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
47 guarantee(_stack_array_depth != NULL, "Count not initialize promotion manager");
48 } else {
49 _stack_array_breadth = new OopTaskQueueSet(ParallelGCThreads);
50 guarantee(_stack_array_breadth != NULL, "Count not initialize promotion manager");
51 }
53 // Create and register the PSPromotionManager(s) for the worker threads.
54 for(uint i=0; i<ParallelGCThreads; i++) {
55 _manager_array[i] = new PSPromotionManager();
56 guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
57 if (UseDepthFirstScavengeOrder) {
58 stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
59 } else {
60 stack_array_breadth()->register_queue(i, _manager_array[i]->claimed_stack_breadth());
61 }
62 }
64 // The VMThread gets its own PSPromotionManager, which is not available
65 // for work stealing.
66 _manager_array[ParallelGCThreads] = new PSPromotionManager();
67 guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
68 }
70 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
71 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
72 assert(_manager_array != NULL, "Sanity");
73 return _manager_array[index];
74 }
76 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
77 assert(_manager_array != NULL, "Sanity");
78 return _manager_array[ParallelGCThreads];
79 }
81 void PSPromotionManager::pre_scavenge() {
82 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
83 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
85 _young_space = heap->young_gen()->to_space();
87 for(uint i=0; i<ParallelGCThreads+1; i++) {
88 manager_array(i)->reset();
89 }
90 }
92 void PSPromotionManager::post_scavenge() {
93 #if PS_PM_STATS
94 print_stats();
95 #endif // PS_PM_STATS
97 for(uint i=0; i<ParallelGCThreads+1; i++) {
98 PSPromotionManager* manager = manager_array(i);
100 // the guarantees are a bit gratuitous but, if one fires, we'll
101 // have a better idea of what went wrong
102 if (i < ParallelGCThreads) {
103 guarantee((!UseDepthFirstScavengeOrder ||
104 manager->overflow_stack_depth()->length() <= 0),
105 "promotion manager overflow stack must be empty");
106 guarantee((UseDepthFirstScavengeOrder ||
107 manager->overflow_stack_breadth()->length() <= 0),
108 "promotion manager overflow stack must be empty");
110 guarantee((!UseDepthFirstScavengeOrder ||
111 manager->claimed_stack_depth()->size() <= 0),
112 "promotion manager claimed stack must be empty");
113 guarantee((UseDepthFirstScavengeOrder ||
114 manager->claimed_stack_breadth()->size() <= 0),
115 "promotion manager claimed stack must be empty");
116 } else {
117 guarantee((!UseDepthFirstScavengeOrder ||
118 manager->overflow_stack_depth()->length() <= 0),
119 "VM Thread promotion manager overflow stack "
120 "must be empty");
121 guarantee((UseDepthFirstScavengeOrder ||
122 manager->overflow_stack_breadth()->length() <= 0),
123 "VM Thread promotion manager overflow stack "
124 "must be empty");
126 guarantee((!UseDepthFirstScavengeOrder ||
127 manager->claimed_stack_depth()->size() <= 0),
128 "VM Thread promotion manager claimed stack "
129 "must be empty");
130 guarantee((UseDepthFirstScavengeOrder ||
131 manager->claimed_stack_breadth()->size() <= 0),
132 "VM Thread promotion manager claimed stack "
133 "must be empty");
134 }
136 manager->flush_labs();
137 }
138 }
140 #if PS_PM_STATS
142 void
143 PSPromotionManager::print_stats(uint i) {
144 tty->print_cr("---- GC Worker %2d Stats", i);
145 tty->print_cr(" total pushes %8d", _total_pushes);
146 tty->print_cr(" masked pushes %8d", _masked_pushes);
147 tty->print_cr(" overflow pushes %8d", _overflow_pushes);
148 tty->print_cr(" max overflow length %8d", _max_overflow_length);
149 tty->print_cr("");
150 tty->print_cr(" arrays chunked %8d", _arrays_chunked);
151 tty->print_cr(" array chunks processed %8d", _array_chunks_processed);
152 tty->print_cr("");
153 tty->print_cr(" total steals %8d", _total_steals);
154 tty->print_cr(" masked steals %8d", _masked_steals);
155 tty->print_cr("");
156 }
158 void
159 PSPromotionManager::print_stats() {
160 tty->print_cr("== GC Tasks Stats (%s), GC %3d",
161 (UseDepthFirstScavengeOrder) ? "Depth-First" : "Breadth-First",
162 Universe::heap()->total_collections());
164 for (uint i = 0; i < ParallelGCThreads+1; ++i) {
165 PSPromotionManager* manager = manager_array(i);
166 manager->print_stats(i);
167 }
168 }
170 #endif // PS_PM_STATS
172 PSPromotionManager::PSPromotionManager() {
173 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
174 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
175 _depth_first = UseDepthFirstScavengeOrder;
177 // We set the old lab's start array.
178 _old_lab.set_start_array(old_gen()->start_array());
180 uint queue_size;
181 if (depth_first()) {
182 claimed_stack_depth()->initialize();
183 queue_size = claimed_stack_depth()->max_elems();
184 // We want the overflow stack to be permanent
185 _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
186 _overflow_stack_breadth = NULL;
187 } else {
188 claimed_stack_breadth()->initialize();
189 queue_size = claimed_stack_breadth()->max_elems();
190 // We want the overflow stack to be permanent
191 _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
192 _overflow_stack_depth = NULL;
193 }
195 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
196 if (_totally_drain) {
197 _target_stack_size = 0;
198 } else {
199 // don't let the target stack size to be more than 1/4 of the entries
200 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
201 (uint) (queue_size / 4));
202 }
204 _array_chunk_size = ParGCArrayScanChunk;
205 // let's choose 1.5x the chunk size
206 _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
208 reset();
209 }
211 void PSPromotionManager::reset() {
212 assert(claimed_stack_empty(), "reset of non-empty claimed stack");
213 assert(overflow_stack_empty(), "reset of non-empty overflow stack");
215 // We need to get an assert in here to make sure the labs are always flushed.
217 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
218 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
220 // Do not prefill the LAB's, save heap wastage!
221 HeapWord* lab_base = young_space()->top();
222 _young_lab.initialize(MemRegion(lab_base, (size_t)0));
223 _young_gen_is_full = false;
225 lab_base = old_gen()->object_space()->top();
226 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
227 _old_gen_is_full = false;
229 _prefetch_queue.clear();
231 #if PS_PM_STATS
232 _total_pushes = 0;
233 _masked_pushes = 0;
234 _overflow_pushes = 0;
235 _max_overflow_length = 0;
236 _arrays_chunked = 0;
237 _array_chunks_processed = 0;
238 _total_steals = 0;
239 _masked_steals = 0;
240 #endif // PS_PM_STATS
241 }
244 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
245 assert(depth_first(), "invariant");
246 assert(overflow_stack_depth() != NULL, "invariant");
247 totally_drain = totally_drain || _totally_drain;
249 #ifdef ASSERT
250 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
251 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
252 MutableSpace* to_space = heap->young_gen()->to_space();
253 MutableSpace* old_space = heap->old_gen()->object_space();
254 MutableSpace* perm_space = heap->perm_gen()->object_space();
255 #endif /* ASSERT */
257 do {
258 StarTask p;
260 // Drain overflow stack first, so other threads can steal from
261 // claimed stack while we work.
262 while(!overflow_stack_depth()->is_empty()) {
263 // linux compiler wants different overloaded operator= in taskqueue to
264 // assign to p that the other compilers don't like.
265 StarTask ptr = overflow_stack_depth()->pop();
266 process_popped_location_depth(ptr);
267 }
269 if (totally_drain) {
270 while (claimed_stack_depth()->pop_local(p)) {
271 process_popped_location_depth(p);
272 }
273 } else {
274 while (claimed_stack_depth()->size() > _target_stack_size &&
275 claimed_stack_depth()->pop_local(p)) {
276 process_popped_location_depth(p);
277 }
278 }
279 } while( (totally_drain && claimed_stack_depth()->size() > 0) ||
280 (overflow_stack_depth()->length() > 0) );
282 assert(!totally_drain || claimed_stack_empty(), "Sanity");
283 assert(totally_drain ||
284 claimed_stack_depth()->size() <= _target_stack_size,
285 "Sanity");
286 assert(overflow_stack_empty(), "Sanity");
287 }
289 void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
290 assert(!depth_first(), "invariant");
291 assert(overflow_stack_breadth() != NULL, "invariant");
292 totally_drain = totally_drain || _totally_drain;
294 #ifdef ASSERT
295 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
296 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
297 MutableSpace* to_space = heap->young_gen()->to_space();
298 MutableSpace* old_space = heap->old_gen()->object_space();
299 MutableSpace* perm_space = heap->perm_gen()->object_space();
300 #endif /* ASSERT */
302 do {
303 oop obj;
305 // Drain overflow stack first, so other threads can steal from
306 // claimed stack while we work.
307 while(!overflow_stack_breadth()->is_empty()) {
308 obj = overflow_stack_breadth()->pop();
309 obj->copy_contents(this);
310 }
312 if (totally_drain) {
313 // obj is a reference!!!
314 while (claimed_stack_breadth()->pop_local(obj)) {
315 // It would be nice to assert about the type of objects we might
316 // pop, but they can come from anywhere, unfortunately.
317 obj->copy_contents(this);
318 }
319 } else {
320 // obj is a reference!!!
321 while (claimed_stack_breadth()->size() > _target_stack_size &&
322 claimed_stack_breadth()->pop_local(obj)) {
323 // It would be nice to assert about the type of objects we might
324 // pop, but they can come from anywhere, unfortunately.
325 obj->copy_contents(this);
326 }
327 }
329 // If we could not find any other work, flush the prefetch queue
330 if (claimed_stack_breadth()->size() == 0 &&
331 (overflow_stack_breadth()->length() == 0)) {
332 flush_prefetch_queue();
333 }
334 } while((totally_drain && claimed_stack_breadth()->size() > 0) ||
335 (overflow_stack_breadth()->length() > 0));
337 assert(!totally_drain || claimed_stack_empty(), "Sanity");
338 assert(totally_drain ||
339 claimed_stack_breadth()->size() <= _target_stack_size,
340 "Sanity");
341 assert(overflow_stack_empty(), "Sanity");
342 }
344 void PSPromotionManager::flush_labs() {
345 assert(claimed_stack_empty(), "Attempt to flush lab with live stack");
346 assert(overflow_stack_empty(), "Attempt to flush lab with live overflow stack");
348 // If either promotion lab fills up, we can flush the
349 // lab but not refill it, so check first.
350 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
351 if (!_young_lab.is_flushed())
352 _young_lab.flush();
354 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
355 if (!_old_lab.is_flushed())
356 _old_lab.flush();
358 // Let PSScavenge know if we overflowed
359 if (_young_gen_is_full) {
360 PSScavenge::set_survivor_overflow(true);
361 }
362 }
364 //
365 // This method is pretty bulky. It would be nice to split it up
366 // into smaller submethods, but we need to be careful not to hurt
367 // performance.
368 //
370 oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
371 assert(PSScavenge::should_scavenge(&o), "Sanity");
373 oop new_obj = NULL;
375 // NOTE! We must be very careful with any methods that access the mark
376 // in o. There may be multiple threads racing on it, and it may be forwarded
377 // at any time. Do not use oop methods for accessing the mark!
378 markOop test_mark = o->mark();
380 // The same test as "o->is_forwarded()"
381 if (!test_mark->is_marked()) {
382 bool new_obj_is_tenured = false;
383 size_t new_obj_size = o->size();
385 // Find the objects age, MT safe.
386 int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
387 test_mark->displaced_mark_helper()->age() : test_mark->age();
389 // Try allocating obj in to-space (unless too old)
390 if (age < PSScavenge::tenuring_threshold()) {
391 new_obj = (oop) _young_lab.allocate(new_obj_size);
392 if (new_obj == NULL && !_young_gen_is_full) {
393 // Do we allocate directly, or flush and refill?
394 if (new_obj_size > (YoungPLABSize / 2)) {
395 // Allocate this object directly
396 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
397 } else {
398 // Flush and fill
399 _young_lab.flush();
401 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
402 if (lab_base != NULL) {
403 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
404 // Try the young lab allocation again.
405 new_obj = (oop) _young_lab.allocate(new_obj_size);
406 } else {
407 _young_gen_is_full = true;
408 }
409 }
410 }
411 }
413 // Otherwise try allocating obj tenured
414 if (new_obj == NULL) {
415 #ifndef PRODUCT
416 if (Universe::heap()->promotion_should_fail()) {
417 return oop_promotion_failed(o, test_mark);
418 }
419 #endif // #ifndef PRODUCT
421 new_obj = (oop) _old_lab.allocate(new_obj_size);
422 new_obj_is_tenured = true;
424 if (new_obj == NULL) {
425 if (!_old_gen_is_full) {
426 // Do we allocate directly, or flush and refill?
427 if (new_obj_size > (OldPLABSize / 2)) {
428 // Allocate this object directly
429 new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
430 } else {
431 // Flush and fill
432 _old_lab.flush();
434 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
435 if(lab_base != NULL) {
436 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
437 // Try the old lab allocation again.
438 new_obj = (oop) _old_lab.allocate(new_obj_size);
439 }
440 }
441 }
443 // This is the promotion failed test, and code handling.
444 // The code belongs here for two reasons. It is slightly
445 // different thatn the code below, and cannot share the
446 // CAS testing code. Keeping the code here also minimizes
447 // the impact on the common case fast path code.
449 if (new_obj == NULL) {
450 _old_gen_is_full = true;
451 return oop_promotion_failed(o, test_mark);
452 }
453 }
454 }
456 assert(new_obj != NULL, "allocation should have succeeded");
458 // Copy obj
459 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
461 // Now we have to CAS in the header.
462 if (o->cas_forward_to(new_obj, test_mark)) {
463 // We won any races, we "own" this object.
464 assert(new_obj == o->forwardee(), "Sanity");
466 // Increment age if obj still in new generation. Now that
467 // we're dealing with a markOop that cannot change, it is
468 // okay to use the non mt safe oop methods.
469 if (!new_obj_is_tenured) {
470 new_obj->incr_age();
471 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
472 }
474 if (depth_first) {
475 // Do the size comparison first with new_obj_size, which we
476 // already have. Hopefully, only a few objects are larger than
477 // _min_array_size_for_chunking, and most of them will be arrays.
478 // So, the is->objArray() test would be very infrequent.
479 if (new_obj_size > _min_array_size_for_chunking &&
480 new_obj->is_objArray() &&
481 PSChunkLargeArrays) {
482 // we'll chunk it
483 #if PS_PM_STATS
484 ++_arrays_chunked;
485 #endif // PS_PM_STATS
486 oop* const masked_o = mask_chunked_array_oop(o);
487 push_depth(masked_o);
488 #if PS_PM_STATS
489 ++_masked_pushes;
490 #endif // PS_PM_STATS
491 } else {
492 // we'll just push its contents
493 new_obj->push_contents(this);
494 }
495 } else {
496 push_breadth(new_obj);
497 }
498 } else {
499 // We lost, someone else "owns" this object
500 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
502 // Try to deallocate the space. If it was directly allocated we cannot
503 // deallocate it, so we have to test. If the deallocation fails,
504 // overwrite with a filler object.
505 if (new_obj_is_tenured) {
506 if (!_old_lab.unallocate_object(new_obj)) {
507 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
508 }
509 } else if (!_young_lab.unallocate_object(new_obj)) {
510 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
511 }
513 // don't update this before the unallocation!
514 new_obj = o->forwardee();
515 }
516 } else {
517 assert(o->is_forwarded(), "Sanity");
518 new_obj = o->forwardee();
519 }
521 #ifdef DEBUG
522 // This code must come after the CAS test, or it will print incorrect
523 // information.
524 if (TraceScavenge) {
525 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
526 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
527 new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
528 }
529 #endif
531 return new_obj;
532 }
534 template <class T> void PSPromotionManager::process_array_chunk_work(
535 oop obj,
536 int start, int end) {
537 assert(start < end, "invariant");
538 T* const base = (T*)objArrayOop(obj)->base();
539 T* p = base + start;
540 T* const chunk_end = base + end;
541 while (p < chunk_end) {
542 if (PSScavenge::should_scavenge(p)) {
543 claim_or_forward_depth(p);
544 }
545 ++p;
546 }
547 }
549 void PSPromotionManager::process_array_chunk(oop old) {
550 assert(PSChunkLargeArrays, "invariant");
551 assert(old->is_objArray(), "invariant");
552 assert(old->is_forwarded(), "invariant");
554 #if PS_PM_STATS
555 ++_array_chunks_processed;
556 #endif // PS_PM_STATS
558 oop const obj = old->forwardee();
560 int start;
561 int const end = arrayOop(old)->length();
562 if (end > (int) _min_array_size_for_chunking) {
563 // we'll chunk more
564 start = end - _array_chunk_size;
565 assert(start > 0, "invariant");
566 arrayOop(old)->set_length(start);
567 push_depth(mask_chunked_array_oop(old));
568 #if PS_PM_STATS
569 ++_masked_pushes;
570 #endif // PS_PM_STATS
571 } else {
572 // this is the final chunk for this array
573 start = 0;
574 int const actual_length = arrayOop(obj)->length();
575 arrayOop(old)->set_length(actual_length);
576 }
578 if (UseCompressedOops) {
579 process_array_chunk_work<narrowOop>(obj, start, end);
580 } else {
581 process_array_chunk_work<oop>(obj, start, end);
582 }
583 }
585 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
586 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
588 // Attempt to CAS in the header.
589 // This tests if the header is still the same as when
590 // this started. If it is the same (i.e., no forwarding
591 // pointer has been installed), then this thread owns
592 // it.
593 if (obj->cas_forward_to(obj, obj_mark)) {
594 // We won any races, we "own" this object.
595 assert(obj == obj->forwardee(), "Sanity");
597 if (depth_first()) {
598 obj->push_contents(this);
599 } else {
600 // Don't bother incrementing the age, just push
601 // onto the claimed_stack..
602 push_breadth(obj);
603 }
605 // Save the mark if needed
606 PSScavenge::oop_promotion_failed(obj, obj_mark);
607 } else {
608 // We lost, someone else "owns" this object
609 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
611 // No unallocation to worry about.
612 obj = obj->forwardee();
613 }
615 #ifdef DEBUG
616 if (TraceScavenge) {
617 gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
618 "promotion-failure",
619 obj->blueprint()->internal_name(),
620 obj, obj->size());
622 }
623 #endif
625 return obj;
626 }