src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp

Wed, 02 Jul 2008 12:55:16 -0700

author
xdono
date
Wed, 02 Jul 2008 12:55:16 -0700
changeset 631
d1605aabd0a1
parent 548
ba764ed4b6f2
child 916
7d7a7c599c17
permissions
-rw-r--r--

6719955: Update copyright year
Summary: Update copyright year for files that have been modified in 2008
Reviewed-by: ohair, tbell

duke@435 1 /*
xdono@631 2 * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_psPromotionManager.cpp.incl"
duke@435 27
duke@435 28 PSPromotionManager** PSPromotionManager::_manager_array = NULL;
duke@435 29 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
duke@435 30 OopTaskQueueSet* PSPromotionManager::_stack_array_breadth = NULL;
duke@435 31 PSOldGen* PSPromotionManager::_old_gen = NULL;
duke@435 32 MutableSpace* PSPromotionManager::_young_space = NULL;
duke@435 33
duke@435 34 void PSPromotionManager::initialize() {
duke@435 35 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 36 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 37
duke@435 38 _old_gen = heap->old_gen();
duke@435 39 _young_space = heap->young_gen()->to_space();
duke@435 40
duke@435 41 assert(_manager_array == NULL, "Attempt to initialize twice");
duke@435 42 _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 );
duke@435 43 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
duke@435 44
duke@435 45 if (UseDepthFirstScavengeOrder) {
duke@435 46 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
duke@435 47 guarantee(_stack_array_depth != NULL, "Count not initialize promotion manager");
duke@435 48 } else {
duke@435 49 _stack_array_breadth = new OopTaskQueueSet(ParallelGCThreads);
duke@435 50 guarantee(_stack_array_breadth != NULL, "Count not initialize promotion manager");
duke@435 51 }
duke@435 52
duke@435 53 // Create and register the PSPromotionManager(s) for the worker threads.
duke@435 54 for(uint i=0; i<ParallelGCThreads; i++) {
duke@435 55 _manager_array[i] = new PSPromotionManager();
duke@435 56 guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
duke@435 57 if (UseDepthFirstScavengeOrder) {
duke@435 58 stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
duke@435 59 } else {
duke@435 60 stack_array_breadth()->register_queue(i, _manager_array[i]->claimed_stack_breadth());
duke@435 61 }
duke@435 62 }
duke@435 63
duke@435 64 // The VMThread gets its own PSPromotionManager, which is not available
duke@435 65 // for work stealing.
duke@435 66 _manager_array[ParallelGCThreads] = new PSPromotionManager();
duke@435 67 guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
duke@435 68 }
duke@435 69
duke@435 70 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
duke@435 71 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
duke@435 72 assert(_manager_array != NULL, "Sanity");
duke@435 73 return _manager_array[index];
duke@435 74 }
duke@435 75
duke@435 76 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
duke@435 77 assert(_manager_array != NULL, "Sanity");
duke@435 78 return _manager_array[ParallelGCThreads];
duke@435 79 }
duke@435 80
duke@435 81 void PSPromotionManager::pre_scavenge() {
duke@435 82 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 83 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 84
duke@435 85 _young_space = heap->young_gen()->to_space();
duke@435 86
duke@435 87 for(uint i=0; i<ParallelGCThreads+1; i++) {
duke@435 88 manager_array(i)->reset();
duke@435 89 }
duke@435 90 }
duke@435 91
duke@435 92 void PSPromotionManager::post_scavenge() {
duke@435 93 #if PS_PM_STATS
duke@435 94 print_stats();
duke@435 95 #endif // PS_PM_STATS
duke@435 96
duke@435 97 for(uint i=0; i<ParallelGCThreads+1; i++) {
duke@435 98 PSPromotionManager* manager = manager_array(i);
duke@435 99
duke@435 100 // the guarantees are a bit gratuitous but, if one fires, we'll
duke@435 101 // have a better idea of what went wrong
duke@435 102 if (i < ParallelGCThreads) {
duke@435 103 guarantee((!UseDepthFirstScavengeOrder ||
duke@435 104 manager->overflow_stack_depth()->length() <= 0),
duke@435 105 "promotion manager overflow stack must be empty");
duke@435 106 guarantee((UseDepthFirstScavengeOrder ||
duke@435 107 manager->overflow_stack_breadth()->length() <= 0),
duke@435 108 "promotion manager overflow stack must be empty");
duke@435 109
duke@435 110 guarantee((!UseDepthFirstScavengeOrder ||
duke@435 111 manager->claimed_stack_depth()->size() <= 0),
duke@435 112 "promotion manager claimed stack must be empty");
duke@435 113 guarantee((UseDepthFirstScavengeOrder ||
duke@435 114 manager->claimed_stack_breadth()->size() <= 0),
duke@435 115 "promotion manager claimed stack must be empty");
duke@435 116 } else {
duke@435 117 guarantee((!UseDepthFirstScavengeOrder ||
duke@435 118 manager->overflow_stack_depth()->length() <= 0),
duke@435 119 "VM Thread promotion manager overflow stack "
duke@435 120 "must be empty");
duke@435 121 guarantee((UseDepthFirstScavengeOrder ||
duke@435 122 manager->overflow_stack_breadth()->length() <= 0),
duke@435 123 "VM Thread promotion manager overflow stack "
duke@435 124 "must be empty");
duke@435 125
duke@435 126 guarantee((!UseDepthFirstScavengeOrder ||
duke@435 127 manager->claimed_stack_depth()->size() <= 0),
duke@435 128 "VM Thread promotion manager claimed stack "
duke@435 129 "must be empty");
duke@435 130 guarantee((UseDepthFirstScavengeOrder ||
duke@435 131 manager->claimed_stack_breadth()->size() <= 0),
duke@435 132 "VM Thread promotion manager claimed stack "
duke@435 133 "must be empty");
duke@435 134 }
duke@435 135
duke@435 136 manager->flush_labs();
duke@435 137 }
duke@435 138 }
duke@435 139
duke@435 140 #if PS_PM_STATS
duke@435 141
duke@435 142 void
duke@435 143 PSPromotionManager::print_stats(uint i) {
duke@435 144 tty->print_cr("---- GC Worker %2d Stats", i);
duke@435 145 tty->print_cr(" total pushes %8d", _total_pushes);
duke@435 146 tty->print_cr(" masked pushes %8d", _masked_pushes);
duke@435 147 tty->print_cr(" overflow pushes %8d", _overflow_pushes);
duke@435 148 tty->print_cr(" max overflow length %8d", _max_overflow_length);
duke@435 149 tty->print_cr("");
duke@435 150 tty->print_cr(" arrays chunked %8d", _arrays_chunked);
duke@435 151 tty->print_cr(" array chunks processed %8d", _array_chunks_processed);
duke@435 152 tty->print_cr("");
duke@435 153 tty->print_cr(" total steals %8d", _total_steals);
duke@435 154 tty->print_cr(" masked steals %8d", _masked_steals);
duke@435 155 tty->print_cr("");
duke@435 156 }
duke@435 157
duke@435 158 void
duke@435 159 PSPromotionManager::print_stats() {
duke@435 160 tty->print_cr("== GC Tasks Stats (%s), GC %3d",
duke@435 161 (UseDepthFirstScavengeOrder) ? "Depth-First" : "Breadth-First",
duke@435 162 Universe::heap()->total_collections());
duke@435 163
duke@435 164 for (uint i = 0; i < ParallelGCThreads+1; ++i) {
duke@435 165 PSPromotionManager* manager = manager_array(i);
duke@435 166 manager->print_stats(i);
duke@435 167 }
duke@435 168 }
duke@435 169
duke@435 170 #endif // PS_PM_STATS
duke@435 171
duke@435 172 PSPromotionManager::PSPromotionManager() {
duke@435 173 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 174 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 175 _depth_first = UseDepthFirstScavengeOrder;
duke@435 176
duke@435 177 // We set the old lab's start array.
duke@435 178 _old_lab.set_start_array(old_gen()->start_array());
duke@435 179
duke@435 180 uint queue_size;
duke@435 181 if (depth_first()) {
duke@435 182 claimed_stack_depth()->initialize();
duke@435 183 queue_size = claimed_stack_depth()->max_elems();
duke@435 184 // We want the overflow stack to be permanent
coleenp@548 185 _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
duke@435 186 _overflow_stack_breadth = NULL;
duke@435 187 } else {
duke@435 188 claimed_stack_breadth()->initialize();
duke@435 189 queue_size = claimed_stack_breadth()->max_elems();
duke@435 190 // We want the overflow stack to be permanent
duke@435 191 _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
duke@435 192 _overflow_stack_depth = NULL;
duke@435 193 }
duke@435 194
duke@435 195 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
duke@435 196 if (_totally_drain) {
duke@435 197 _target_stack_size = 0;
duke@435 198 } else {
duke@435 199 // don't let the target stack size to be more than 1/4 of the entries
duke@435 200 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
duke@435 201 (uint) (queue_size / 4));
duke@435 202 }
duke@435 203
duke@435 204 _array_chunk_size = ParGCArrayScanChunk;
duke@435 205 // let's choose 1.5x the chunk size
duke@435 206 _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
duke@435 207
duke@435 208 reset();
duke@435 209 }
duke@435 210
duke@435 211 void PSPromotionManager::reset() {
duke@435 212 assert(claimed_stack_empty(), "reset of non-empty claimed stack");
duke@435 213 assert(overflow_stack_empty(), "reset of non-empty overflow stack");
duke@435 214
duke@435 215 // We need to get an assert in here to make sure the labs are always flushed.
duke@435 216
duke@435 217 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 218 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 219
duke@435 220 // Do not prefill the LAB's, save heap wastage!
duke@435 221 HeapWord* lab_base = young_space()->top();
duke@435 222 _young_lab.initialize(MemRegion(lab_base, (size_t)0));
duke@435 223 _young_gen_is_full = false;
duke@435 224
duke@435 225 lab_base = old_gen()->object_space()->top();
duke@435 226 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
duke@435 227 _old_gen_is_full = false;
duke@435 228
duke@435 229 _prefetch_queue.clear();
duke@435 230
duke@435 231 #if PS_PM_STATS
duke@435 232 _total_pushes = 0;
duke@435 233 _masked_pushes = 0;
duke@435 234 _overflow_pushes = 0;
duke@435 235 _max_overflow_length = 0;
duke@435 236 _arrays_chunked = 0;
duke@435 237 _array_chunks_processed = 0;
duke@435 238 _total_steals = 0;
duke@435 239 _masked_steals = 0;
duke@435 240 #endif // PS_PM_STATS
duke@435 241 }
duke@435 242
coleenp@548 243
duke@435 244 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
duke@435 245 assert(depth_first(), "invariant");
duke@435 246 assert(overflow_stack_depth() != NULL, "invariant");
duke@435 247 totally_drain = totally_drain || _totally_drain;
duke@435 248
duke@435 249 #ifdef ASSERT
duke@435 250 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 251 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 252 MutableSpace* to_space = heap->young_gen()->to_space();
duke@435 253 MutableSpace* old_space = heap->old_gen()->object_space();
duke@435 254 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 255 #endif /* ASSERT */
duke@435 256
duke@435 257 do {
coleenp@548 258 StarTask p;
duke@435 259
duke@435 260 // Drain overflow stack first, so other threads can steal from
duke@435 261 // claimed stack while we work.
duke@435 262 while(!overflow_stack_depth()->is_empty()) {
coleenp@548 263 // linux compiler wants different overloaded operator= in taskqueue to
coleenp@548 264 // assign to p that the other compilers don't like.
coleenp@548 265 StarTask ptr = overflow_stack_depth()->pop();
coleenp@548 266 process_popped_location_depth(ptr);
duke@435 267 }
duke@435 268
duke@435 269 if (totally_drain) {
duke@435 270 while (claimed_stack_depth()->pop_local(p)) {
duke@435 271 process_popped_location_depth(p);
duke@435 272 }
duke@435 273 } else {
duke@435 274 while (claimed_stack_depth()->size() > _target_stack_size &&
duke@435 275 claimed_stack_depth()->pop_local(p)) {
duke@435 276 process_popped_location_depth(p);
duke@435 277 }
duke@435 278 }
duke@435 279 } while( (totally_drain && claimed_stack_depth()->size() > 0) ||
duke@435 280 (overflow_stack_depth()->length() > 0) );
duke@435 281
duke@435 282 assert(!totally_drain || claimed_stack_empty(), "Sanity");
duke@435 283 assert(totally_drain ||
duke@435 284 claimed_stack_depth()->size() <= _target_stack_size,
duke@435 285 "Sanity");
duke@435 286 assert(overflow_stack_empty(), "Sanity");
duke@435 287 }
duke@435 288
duke@435 289 void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
duke@435 290 assert(!depth_first(), "invariant");
duke@435 291 assert(overflow_stack_breadth() != NULL, "invariant");
duke@435 292 totally_drain = totally_drain || _totally_drain;
duke@435 293
duke@435 294 #ifdef ASSERT
duke@435 295 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 296 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 297 MutableSpace* to_space = heap->young_gen()->to_space();
duke@435 298 MutableSpace* old_space = heap->old_gen()->object_space();
duke@435 299 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 300 #endif /* ASSERT */
duke@435 301
duke@435 302 do {
duke@435 303 oop obj;
duke@435 304
duke@435 305 // Drain overflow stack first, so other threads can steal from
duke@435 306 // claimed stack while we work.
duke@435 307 while(!overflow_stack_breadth()->is_empty()) {
duke@435 308 obj = overflow_stack_breadth()->pop();
duke@435 309 obj->copy_contents(this);
duke@435 310 }
duke@435 311
duke@435 312 if (totally_drain) {
duke@435 313 // obj is a reference!!!
duke@435 314 while (claimed_stack_breadth()->pop_local(obj)) {
duke@435 315 // It would be nice to assert about the type of objects we might
duke@435 316 // pop, but they can come from anywhere, unfortunately.
duke@435 317 obj->copy_contents(this);
duke@435 318 }
duke@435 319 } else {
duke@435 320 // obj is a reference!!!
duke@435 321 while (claimed_stack_breadth()->size() > _target_stack_size &&
duke@435 322 claimed_stack_breadth()->pop_local(obj)) {
duke@435 323 // It would be nice to assert about the type of objects we might
duke@435 324 // pop, but they can come from anywhere, unfortunately.
duke@435 325 obj->copy_contents(this);
duke@435 326 }
duke@435 327 }
duke@435 328
duke@435 329 // If we could not find any other work, flush the prefetch queue
duke@435 330 if (claimed_stack_breadth()->size() == 0 &&
duke@435 331 (overflow_stack_breadth()->length() == 0)) {
duke@435 332 flush_prefetch_queue();
duke@435 333 }
duke@435 334 } while((totally_drain && claimed_stack_breadth()->size() > 0) ||
duke@435 335 (overflow_stack_breadth()->length() > 0));
duke@435 336
duke@435 337 assert(!totally_drain || claimed_stack_empty(), "Sanity");
duke@435 338 assert(totally_drain ||
duke@435 339 claimed_stack_breadth()->size() <= _target_stack_size,
duke@435 340 "Sanity");
duke@435 341 assert(overflow_stack_empty(), "Sanity");
duke@435 342 }
duke@435 343
duke@435 344 void PSPromotionManager::flush_labs() {
duke@435 345 assert(claimed_stack_empty(), "Attempt to flush lab with live stack");
duke@435 346 assert(overflow_stack_empty(), "Attempt to flush lab with live overflow stack");
duke@435 347
duke@435 348 // If either promotion lab fills up, we can flush the
duke@435 349 // lab but not refill it, so check first.
duke@435 350 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
duke@435 351 if (!_young_lab.is_flushed())
duke@435 352 _young_lab.flush();
duke@435 353
duke@435 354 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
duke@435 355 if (!_old_lab.is_flushed())
duke@435 356 _old_lab.flush();
duke@435 357
duke@435 358 // Let PSScavenge know if we overflowed
duke@435 359 if (_young_gen_is_full) {
duke@435 360 PSScavenge::set_survivor_overflow(true);
duke@435 361 }
duke@435 362 }
duke@435 363
duke@435 364 //
duke@435 365 // This method is pretty bulky. It would be nice to split it up
duke@435 366 // into smaller submethods, but we need to be careful not to hurt
duke@435 367 // performance.
duke@435 368 //
duke@435 369
duke@435 370 oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
coleenp@548 371 assert(PSScavenge::should_scavenge(&o), "Sanity");
duke@435 372
duke@435 373 oop new_obj = NULL;
duke@435 374
duke@435 375 // NOTE! We must be very careful with any methods that access the mark
duke@435 376 // in o. There may be multiple threads racing on it, and it may be forwarded
duke@435 377 // at any time. Do not use oop methods for accessing the mark!
duke@435 378 markOop test_mark = o->mark();
duke@435 379
duke@435 380 // The same test as "o->is_forwarded()"
duke@435 381 if (!test_mark->is_marked()) {
duke@435 382 bool new_obj_is_tenured = false;
duke@435 383 size_t new_obj_size = o->size();
duke@435 384
duke@435 385 // Find the objects age, MT safe.
duke@435 386 int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
duke@435 387 test_mark->displaced_mark_helper()->age() : test_mark->age();
duke@435 388
duke@435 389 // Try allocating obj in to-space (unless too old)
duke@435 390 if (age < PSScavenge::tenuring_threshold()) {
duke@435 391 new_obj = (oop) _young_lab.allocate(new_obj_size);
duke@435 392 if (new_obj == NULL && !_young_gen_is_full) {
duke@435 393 // Do we allocate directly, or flush and refill?
duke@435 394 if (new_obj_size > (YoungPLABSize / 2)) {
duke@435 395 // Allocate this object directly
duke@435 396 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
duke@435 397 } else {
duke@435 398 // Flush and fill
duke@435 399 _young_lab.flush();
duke@435 400
duke@435 401 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
duke@435 402 if (lab_base != NULL) {
duke@435 403 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
duke@435 404 // Try the young lab allocation again.
duke@435 405 new_obj = (oop) _young_lab.allocate(new_obj_size);
duke@435 406 } else {
duke@435 407 _young_gen_is_full = true;
duke@435 408 }
duke@435 409 }
duke@435 410 }
duke@435 411 }
duke@435 412
duke@435 413 // Otherwise try allocating obj tenured
duke@435 414 if (new_obj == NULL) {
duke@435 415 #ifndef PRODUCT
duke@435 416 if (Universe::heap()->promotion_should_fail()) {
duke@435 417 return oop_promotion_failed(o, test_mark);
duke@435 418 }
duke@435 419 #endif // #ifndef PRODUCT
duke@435 420
duke@435 421 new_obj = (oop) _old_lab.allocate(new_obj_size);
duke@435 422 new_obj_is_tenured = true;
duke@435 423
duke@435 424 if (new_obj == NULL) {
duke@435 425 if (!_old_gen_is_full) {
duke@435 426 // Do we allocate directly, or flush and refill?
duke@435 427 if (new_obj_size > (OldPLABSize / 2)) {
duke@435 428 // Allocate this object directly
duke@435 429 new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
duke@435 430 } else {
duke@435 431 // Flush and fill
duke@435 432 _old_lab.flush();
duke@435 433
duke@435 434 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
duke@435 435 if(lab_base != NULL) {
duke@435 436 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
duke@435 437 // Try the old lab allocation again.
duke@435 438 new_obj = (oop) _old_lab.allocate(new_obj_size);
duke@435 439 }
duke@435 440 }
duke@435 441 }
duke@435 442
duke@435 443 // This is the promotion failed test, and code handling.
duke@435 444 // The code belongs here for two reasons. It is slightly
duke@435 445 // different thatn the code below, and cannot share the
duke@435 446 // CAS testing code. Keeping the code here also minimizes
duke@435 447 // the impact on the common case fast path code.
duke@435 448
duke@435 449 if (new_obj == NULL) {
duke@435 450 _old_gen_is_full = true;
duke@435 451 return oop_promotion_failed(o, test_mark);
duke@435 452 }
duke@435 453 }
duke@435 454 }
duke@435 455
duke@435 456 assert(new_obj != NULL, "allocation should have succeeded");
duke@435 457
duke@435 458 // Copy obj
duke@435 459 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
duke@435 460
duke@435 461 // Now we have to CAS in the header.
duke@435 462 if (o->cas_forward_to(new_obj, test_mark)) {
duke@435 463 // We won any races, we "own" this object.
duke@435 464 assert(new_obj == o->forwardee(), "Sanity");
duke@435 465
duke@435 466 // Increment age if obj still in new generation. Now that
duke@435 467 // we're dealing with a markOop that cannot change, it is
duke@435 468 // okay to use the non mt safe oop methods.
duke@435 469 if (!new_obj_is_tenured) {
duke@435 470 new_obj->incr_age();
duke@435 471 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
duke@435 472 }
duke@435 473
duke@435 474 if (depth_first) {
duke@435 475 // Do the size comparison first with new_obj_size, which we
duke@435 476 // already have. Hopefully, only a few objects are larger than
duke@435 477 // _min_array_size_for_chunking, and most of them will be arrays.
duke@435 478 // So, the is->objArray() test would be very infrequent.
duke@435 479 if (new_obj_size > _min_array_size_for_chunking &&
duke@435 480 new_obj->is_objArray() &&
duke@435 481 PSChunkLargeArrays) {
duke@435 482 // we'll chunk it
duke@435 483 #if PS_PM_STATS
duke@435 484 ++_arrays_chunked;
duke@435 485 #endif // PS_PM_STATS
duke@435 486 oop* const masked_o = mask_chunked_array_oop(o);
duke@435 487 push_depth(masked_o);
duke@435 488 #if PS_PM_STATS
duke@435 489 ++_masked_pushes;
duke@435 490 #endif // PS_PM_STATS
duke@435 491 } else {
duke@435 492 // we'll just push its contents
duke@435 493 new_obj->push_contents(this);
duke@435 494 }
duke@435 495 } else {
duke@435 496 push_breadth(new_obj);
duke@435 497 }
duke@435 498 } else {
duke@435 499 // We lost, someone else "owns" this object
duke@435 500 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
duke@435 501
duke@435 502 // Unallocate the space used. NOTE! We may have directly allocated
duke@435 503 // the object. If so, we cannot deallocate it, so we have to test!
duke@435 504 if (new_obj_is_tenured) {
duke@435 505 if (!_old_lab.unallocate_object(new_obj)) {
duke@435 506 // The promotion lab failed to unallocate the object.
duke@435 507 // We need to overwrite the object with a filler that
duke@435 508 // contains no interior pointers.
duke@435 509 MemRegion mr((HeapWord*)new_obj, new_obj_size);
duke@435 510 // Clean this up and move to oopFactory (see bug 4718422)
duke@435 511 SharedHeap::fill_region_with_object(mr);
duke@435 512 }
duke@435 513 } else {
duke@435 514 if (!_young_lab.unallocate_object(new_obj)) {
duke@435 515 // The promotion lab failed to unallocate the object.
duke@435 516 // We need to overwrite the object with a filler that
duke@435 517 // contains no interior pointers.
duke@435 518 MemRegion mr((HeapWord*)new_obj, new_obj_size);
duke@435 519 // Clean this up and move to oopFactory (see bug 4718422)
duke@435 520 SharedHeap::fill_region_with_object(mr);
duke@435 521 }
duke@435 522 }
duke@435 523
duke@435 524 // don't update this before the unallocation!
duke@435 525 new_obj = o->forwardee();
duke@435 526 }
duke@435 527 } else {
duke@435 528 assert(o->is_forwarded(), "Sanity");
duke@435 529 new_obj = o->forwardee();
duke@435 530 }
duke@435 531
duke@435 532 #ifdef DEBUG
duke@435 533 // This code must come after the CAS test, or it will print incorrect
duke@435 534 // information.
duke@435 535 if (TraceScavenge) {
coleenp@548 536 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
coleenp@548 537 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
duke@435 538 new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
duke@435 539 }
duke@435 540 #endif
duke@435 541
duke@435 542 return new_obj;
duke@435 543 }
duke@435 544
coleenp@548 545 template <class T> void PSPromotionManager::process_array_chunk_work(
coleenp@548 546 oop obj,
coleenp@548 547 int start, int end) {
coleenp@548 548 assert(start < end, "invariant");
coleenp@548 549 T* const base = (T*)objArrayOop(obj)->base();
coleenp@548 550 T* p = base + start;
coleenp@548 551 T* const chunk_end = base + end;
coleenp@548 552 while (p < chunk_end) {
coleenp@548 553 if (PSScavenge::should_scavenge(p)) {
coleenp@548 554 claim_or_forward_depth(p);
coleenp@548 555 }
coleenp@548 556 ++p;
coleenp@548 557 }
coleenp@548 558 }
coleenp@548 559
duke@435 560 void PSPromotionManager::process_array_chunk(oop old) {
duke@435 561 assert(PSChunkLargeArrays, "invariant");
duke@435 562 assert(old->is_objArray(), "invariant");
duke@435 563 assert(old->is_forwarded(), "invariant");
duke@435 564
duke@435 565 #if PS_PM_STATS
duke@435 566 ++_array_chunks_processed;
duke@435 567 #endif // PS_PM_STATS
duke@435 568
duke@435 569 oop const obj = old->forwardee();
duke@435 570
duke@435 571 int start;
duke@435 572 int const end = arrayOop(old)->length();
duke@435 573 if (end > (int) _min_array_size_for_chunking) {
duke@435 574 // we'll chunk more
duke@435 575 start = end - _array_chunk_size;
duke@435 576 assert(start > 0, "invariant");
duke@435 577 arrayOop(old)->set_length(start);
duke@435 578 push_depth(mask_chunked_array_oop(old));
duke@435 579 #if PS_PM_STATS
duke@435 580 ++_masked_pushes;
duke@435 581 #endif // PS_PM_STATS
duke@435 582 } else {
duke@435 583 // this is the final chunk for this array
duke@435 584 start = 0;
duke@435 585 int const actual_length = arrayOop(obj)->length();
duke@435 586 arrayOop(old)->set_length(actual_length);
duke@435 587 }
duke@435 588
coleenp@548 589 if (UseCompressedOops) {
coleenp@548 590 process_array_chunk_work<narrowOop>(obj, start, end);
coleenp@548 591 } else {
coleenp@548 592 process_array_chunk_work<oop>(obj, start, end);
duke@435 593 }
duke@435 594 }
duke@435 595
duke@435 596 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
duke@435 597 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
duke@435 598
duke@435 599 // Attempt to CAS in the header.
duke@435 600 // This tests if the header is still the same as when
duke@435 601 // this started. If it is the same (i.e., no forwarding
duke@435 602 // pointer has been installed), then this thread owns
duke@435 603 // it.
duke@435 604 if (obj->cas_forward_to(obj, obj_mark)) {
duke@435 605 // We won any races, we "own" this object.
duke@435 606 assert(obj == obj->forwardee(), "Sanity");
duke@435 607
duke@435 608 if (depth_first()) {
duke@435 609 obj->push_contents(this);
duke@435 610 } else {
duke@435 611 // Don't bother incrementing the age, just push
duke@435 612 // onto the claimed_stack..
duke@435 613 push_breadth(obj);
duke@435 614 }
duke@435 615
duke@435 616 // Save the mark if needed
duke@435 617 PSScavenge::oop_promotion_failed(obj, obj_mark);
duke@435 618 } else {
duke@435 619 // We lost, someone else "owns" this object
duke@435 620 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
duke@435 621
duke@435 622 // No unallocation to worry about.
duke@435 623 obj = obj->forwardee();
duke@435 624 }
duke@435 625
duke@435 626 #ifdef DEBUG
duke@435 627 if (TraceScavenge) {
duke@435 628 gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
duke@435 629 "promotion-failure",
duke@435 630 obj->blueprint()->internal_name(),
duke@435 631 obj, obj->size());
duke@435 632
duke@435 633 }
duke@435 634 #endif
duke@435 635
duke@435 636 return obj;
duke@435 637 }

mercurial