src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2708
1d1603768966
child 3181
c63b928b212b
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
trims@2708 2 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
stefank@2314 27 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
stefank@2314 28 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
stefank@2314 30 #include "gc_implementation/shared/mutableSpace.hpp"
stefank@2314 31 #include "memory/memRegion.hpp"
stefank@2314 32 #include "oops/oop.inline.hpp"
stefank@2314 33 #include "oops/oop.psgc.inline.hpp"
duke@435 34
duke@435 35 PSPromotionManager** PSPromotionManager::_manager_array = NULL;
duke@435 36 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
duke@435 37 PSOldGen* PSPromotionManager::_old_gen = NULL;
duke@435 38 MutableSpace* PSPromotionManager::_young_space = NULL;
duke@435 39
duke@435 40 void PSPromotionManager::initialize() {
duke@435 41 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 42 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 43
duke@435 44 _old_gen = heap->old_gen();
duke@435 45 _young_space = heap->young_gen()->to_space();
duke@435 46
duke@435 47 assert(_manager_array == NULL, "Attempt to initialize twice");
duke@435 48 _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 );
duke@435 49 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
duke@435 50
tonyp@2061 51 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
tonyp@2061 52 guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager");
duke@435 53
duke@435 54 // Create and register the PSPromotionManager(s) for the worker threads.
duke@435 55 for(uint i=0; i<ParallelGCThreads; i++) {
duke@435 56 _manager_array[i] = new PSPromotionManager();
duke@435 57 guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
tonyp@2061 58 stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
duke@435 59 }
duke@435 60
duke@435 61 // The VMThread gets its own PSPromotionManager, which is not available
duke@435 62 // for work stealing.
duke@435 63 _manager_array[ParallelGCThreads] = new PSPromotionManager();
duke@435 64 guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
duke@435 65 }
duke@435 66
duke@435 67 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
duke@435 68 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
duke@435 69 assert(_manager_array != NULL, "Sanity");
duke@435 70 return _manager_array[index];
duke@435 71 }
duke@435 72
duke@435 73 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
duke@435 74 assert(_manager_array != NULL, "Sanity");
duke@435 75 return _manager_array[ParallelGCThreads];
duke@435 76 }
duke@435 77
duke@435 78 void PSPromotionManager::pre_scavenge() {
duke@435 79 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 80 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 81
duke@435 82 _young_space = heap->young_gen()->to_space();
duke@435 83
duke@435 84 for(uint i=0; i<ParallelGCThreads+1; i++) {
duke@435 85 manager_array(i)->reset();
duke@435 86 }
duke@435 87 }
duke@435 88
duke@435 89 void PSPromotionManager::post_scavenge() {
jcoomes@2020 90 TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
jcoomes@1993 91 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
duke@435 92 PSPromotionManager* manager = manager_array(i);
tonyp@2061 93 assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
duke@435 94 manager->flush_labs();
duke@435 95 }
duke@435 96 }
duke@435 97
jcoomes@2020 98 #if TASKQUEUE_STATS
duke@435 99 void
jcoomes@2020 100 PSPromotionManager::print_taskqueue_stats(uint i) const {
jcoomes@2020 101 tty->print("%3u ", i);
tonyp@2061 102 _claimed_stack_depth.stats.print();
jcoomes@2020 103 tty->cr();
duke@435 104 }
duke@435 105
duke@435 106 void
jcoomes@2020 107 PSPromotionManager::print_local_stats(uint i) const {
jcoomes@2020 108 #define FMT " " SIZE_FORMAT_W(10)
jcoomes@2020 109 tty->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
jcoomes@2020 110 _arrays_chunked, _array_chunks_processed);
jcoomes@2020 111 #undef FMT
jcoomes@2020 112 }
jcoomes@2020 113
jcoomes@2020 114 static const char* const pm_stats_hdr[] = {
jcoomes@2020 115 " --------masked------- arrays array",
jcoomes@2020 116 "thr push steal chunked chunks",
jcoomes@2020 117 "--- ---------- ---------- ---------- ----------"
jcoomes@2020 118 };
jcoomes@2020 119
jcoomes@2020 120 void
duke@435 121 PSPromotionManager::print_stats() {
tonyp@2061 122 tty->print_cr("== GC Tasks Stats, GC %3d",
duke@435 123 Universe::heap()->total_collections());
duke@435 124
jcoomes@2020 125 tty->print("thr "); TaskQueueStats::print_header(1); tty->cr();
jcoomes@2020 126 tty->print("--- "); TaskQueueStats::print_header(2); tty->cr();
jcoomes@2020 127 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
jcoomes@2020 128 manager_array(i)->print_taskqueue_stats(i);
jcoomes@2020 129 }
jcoomes@2020 130
jcoomes@2020 131 const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
jcoomes@2020 132 for (uint i = 0; i < hlines; ++i) tty->print_cr(pm_stats_hdr[i]);
jcoomes@2020 133 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
jcoomes@2020 134 manager_array(i)->print_local_stats(i);
duke@435 135 }
duke@435 136 }
duke@435 137
jcoomes@2020 138 void
jcoomes@2020 139 PSPromotionManager::reset_stats() {
tonyp@2061 140 claimed_stack_depth()->stats.reset();
jcoomes@2020 141 _masked_pushes = _masked_steals = 0;
jcoomes@2020 142 _arrays_chunked = _array_chunks_processed = 0;
jcoomes@2020 143 }
jcoomes@2020 144 #endif // TASKQUEUE_STATS
duke@435 145
duke@435 146 PSPromotionManager::PSPromotionManager() {
duke@435 147 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 148 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 149
duke@435 150 // We set the old lab's start array.
duke@435 151 _old_lab.set_start_array(old_gen()->start_array());
duke@435 152
duke@435 153 uint queue_size;
tonyp@2061 154 claimed_stack_depth()->initialize();
tonyp@2061 155 queue_size = claimed_stack_depth()->max_elems();
duke@435 156
duke@435 157 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
duke@435 158 if (_totally_drain) {
duke@435 159 _target_stack_size = 0;
duke@435 160 } else {
duke@435 161 // don't let the target stack size to be more than 1/4 of the entries
duke@435 162 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
duke@435 163 (uint) (queue_size / 4));
duke@435 164 }
duke@435 165
duke@435 166 _array_chunk_size = ParGCArrayScanChunk;
duke@435 167 // let's choose 1.5x the chunk size
duke@435 168 _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
duke@435 169
duke@435 170 reset();
duke@435 171 }
duke@435 172
duke@435 173 void PSPromotionManager::reset() {
jcoomes@1993 174 assert(stacks_empty(), "reset of non-empty stack");
duke@435 175
duke@435 176 // We need to get an assert in here to make sure the labs are always flushed.
duke@435 177
duke@435 178 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 179 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 180
duke@435 181 // Do not prefill the LAB's, save heap wastage!
duke@435 182 HeapWord* lab_base = young_space()->top();
duke@435 183 _young_lab.initialize(MemRegion(lab_base, (size_t)0));
duke@435 184 _young_gen_is_full = false;
duke@435 185
duke@435 186 lab_base = old_gen()->object_space()->top();
duke@435 187 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
duke@435 188 _old_gen_is_full = false;
duke@435 189
jcoomes@2020 190 TASKQUEUE_STATS_ONLY(reset_stats());
duke@435 191 }
duke@435 192
coleenp@548 193
duke@435 194 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
duke@435 195 totally_drain = totally_drain || _totally_drain;
duke@435 196
duke@435 197 #ifdef ASSERT
duke@435 198 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 199 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 200 MutableSpace* to_space = heap->young_gen()->to_space();
duke@435 201 MutableSpace* old_space = heap->old_gen()->object_space();
duke@435 202 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 203 #endif /* ASSERT */
duke@435 204
jcoomes@1993 205 OopStarTaskQueue* const tq = claimed_stack_depth();
duke@435 206 do {
coleenp@548 207 StarTask p;
duke@435 208
duke@435 209 // Drain overflow stack first, so other threads can steal from
duke@435 210 // claimed stack while we work.
jcoomes@1993 211 while (tq->pop_overflow(p)) {
jcoomes@1993 212 process_popped_location_depth(p);
duke@435 213 }
duke@435 214
duke@435 215 if (totally_drain) {
jcoomes@1993 216 while (tq->pop_local(p)) {
duke@435 217 process_popped_location_depth(p);
duke@435 218 }
duke@435 219 } else {
jcoomes@1993 220 while (tq->size() > _target_stack_size && tq->pop_local(p)) {
duke@435 221 process_popped_location_depth(p);
duke@435 222 }
duke@435 223 }
jcoomes@1993 224 } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
duke@435 225
jcoomes@1993 226 assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
jcoomes@1993 227 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
jcoomes@1993 228 assert(tq->overflow_empty(), "Sanity");
duke@435 229 }
duke@435 230
duke@435 231 void PSPromotionManager::flush_labs() {
jcoomes@1993 232 assert(stacks_empty(), "Attempt to flush lab with live stack");
duke@435 233
duke@435 234 // If either promotion lab fills up, we can flush the
duke@435 235 // lab but not refill it, so check first.
duke@435 236 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
duke@435 237 if (!_young_lab.is_flushed())
duke@435 238 _young_lab.flush();
duke@435 239
duke@435 240 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
duke@435 241 if (!_old_lab.is_flushed())
duke@435 242 _old_lab.flush();
duke@435 243
duke@435 244 // Let PSScavenge know if we overflowed
duke@435 245 if (_young_gen_is_full) {
duke@435 246 PSScavenge::set_survivor_overflow(true);
duke@435 247 }
duke@435 248 }
duke@435 249
duke@435 250 //
duke@435 251 // This method is pretty bulky. It would be nice to split it up
duke@435 252 // into smaller submethods, but we need to be careful not to hurt
duke@435 253 // performance.
duke@435 254 //
duke@435 255
tonyp@2061 256 oop PSPromotionManager::copy_to_survivor_space(oop o) {
coleenp@548 257 assert(PSScavenge::should_scavenge(&o), "Sanity");
duke@435 258
duke@435 259 oop new_obj = NULL;
duke@435 260
duke@435 261 // NOTE! We must be very careful with any methods that access the mark
duke@435 262 // in o. There may be multiple threads racing on it, and it may be forwarded
duke@435 263 // at any time. Do not use oop methods for accessing the mark!
duke@435 264 markOop test_mark = o->mark();
duke@435 265
duke@435 266 // The same test as "o->is_forwarded()"
duke@435 267 if (!test_mark->is_marked()) {
duke@435 268 bool new_obj_is_tenured = false;
duke@435 269 size_t new_obj_size = o->size();
duke@435 270
duke@435 271 // Find the objects age, MT safe.
duke@435 272 int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
duke@435 273 test_mark->displaced_mark_helper()->age() : test_mark->age();
duke@435 274
duke@435 275 // Try allocating obj in to-space (unless too old)
duke@435 276 if (age < PSScavenge::tenuring_threshold()) {
duke@435 277 new_obj = (oop) _young_lab.allocate(new_obj_size);
duke@435 278 if (new_obj == NULL && !_young_gen_is_full) {
duke@435 279 // Do we allocate directly, or flush and refill?
duke@435 280 if (new_obj_size > (YoungPLABSize / 2)) {
duke@435 281 // Allocate this object directly
duke@435 282 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
duke@435 283 } else {
duke@435 284 // Flush and fill
duke@435 285 _young_lab.flush();
duke@435 286
duke@435 287 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
duke@435 288 if (lab_base != NULL) {
duke@435 289 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
duke@435 290 // Try the young lab allocation again.
duke@435 291 new_obj = (oop) _young_lab.allocate(new_obj_size);
duke@435 292 } else {
duke@435 293 _young_gen_is_full = true;
duke@435 294 }
duke@435 295 }
duke@435 296 }
duke@435 297 }
duke@435 298
duke@435 299 // Otherwise try allocating obj tenured
duke@435 300 if (new_obj == NULL) {
duke@435 301 #ifndef PRODUCT
duke@435 302 if (Universe::heap()->promotion_should_fail()) {
duke@435 303 return oop_promotion_failed(o, test_mark);
duke@435 304 }
duke@435 305 #endif // #ifndef PRODUCT
duke@435 306
duke@435 307 new_obj = (oop) _old_lab.allocate(new_obj_size);
duke@435 308 new_obj_is_tenured = true;
duke@435 309
duke@435 310 if (new_obj == NULL) {
duke@435 311 if (!_old_gen_is_full) {
duke@435 312 // Do we allocate directly, or flush and refill?
duke@435 313 if (new_obj_size > (OldPLABSize / 2)) {
duke@435 314 // Allocate this object directly
duke@435 315 new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
duke@435 316 } else {
duke@435 317 // Flush and fill
duke@435 318 _old_lab.flush();
duke@435 319
duke@435 320 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
duke@435 321 if(lab_base != NULL) {
duke@435 322 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
duke@435 323 // Try the old lab allocation again.
duke@435 324 new_obj = (oop) _old_lab.allocate(new_obj_size);
duke@435 325 }
duke@435 326 }
duke@435 327 }
duke@435 328
duke@435 329 // This is the promotion failed test, and code handling.
duke@435 330 // The code belongs here for two reasons. It is slightly
duke@435 331 // different thatn the code below, and cannot share the
duke@435 332 // CAS testing code. Keeping the code here also minimizes
duke@435 333 // the impact on the common case fast path code.
duke@435 334
duke@435 335 if (new_obj == NULL) {
duke@435 336 _old_gen_is_full = true;
duke@435 337 return oop_promotion_failed(o, test_mark);
duke@435 338 }
duke@435 339 }
duke@435 340 }
duke@435 341
duke@435 342 assert(new_obj != NULL, "allocation should have succeeded");
duke@435 343
duke@435 344 // Copy obj
duke@435 345 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
duke@435 346
duke@435 347 // Now we have to CAS in the header.
duke@435 348 if (o->cas_forward_to(new_obj, test_mark)) {
duke@435 349 // We won any races, we "own" this object.
duke@435 350 assert(new_obj == o->forwardee(), "Sanity");
duke@435 351
duke@435 352 // Increment age if obj still in new generation. Now that
duke@435 353 // we're dealing with a markOop that cannot change, it is
duke@435 354 // okay to use the non mt safe oop methods.
duke@435 355 if (!new_obj_is_tenured) {
duke@435 356 new_obj->incr_age();
duke@435 357 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
duke@435 358 }
duke@435 359
tonyp@2061 360 // Do the size comparison first with new_obj_size, which we
tonyp@2061 361 // already have. Hopefully, only a few objects are larger than
tonyp@2061 362 // _min_array_size_for_chunking, and most of them will be arrays.
tonyp@2061 363 // So, the is->objArray() test would be very infrequent.
tonyp@2061 364 if (new_obj_size > _min_array_size_for_chunking &&
tonyp@2061 365 new_obj->is_objArray() &&
tonyp@2061 366 PSChunkLargeArrays) {
tonyp@2061 367 // we'll chunk it
tonyp@2061 368 oop* const masked_o = mask_chunked_array_oop(o);
tonyp@2061 369 push_depth(masked_o);
tonyp@2061 370 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
duke@435 371 } else {
tonyp@2061 372 // we'll just push its contents
tonyp@2061 373 new_obj->push_contents(this);
duke@435 374 }
duke@435 375 } else {
duke@435 376 // We lost, someone else "owns" this object
duke@435 377 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
duke@435 378
jcoomes@916 379 // Try to deallocate the space. If it was directly allocated we cannot
jcoomes@916 380 // deallocate it, so we have to test. If the deallocation fails,
jcoomes@916 381 // overwrite with a filler object.
duke@435 382 if (new_obj_is_tenured) {
duke@435 383 if (!_old_lab.unallocate_object(new_obj)) {
jcoomes@916 384 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
duke@435 385 }
jcoomes@916 386 } else if (!_young_lab.unallocate_object(new_obj)) {
jcoomes@916 387 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
duke@435 388 }
duke@435 389
duke@435 390 // don't update this before the unallocation!
duke@435 391 new_obj = o->forwardee();
duke@435 392 }
duke@435 393 } else {
duke@435 394 assert(o->is_forwarded(), "Sanity");
duke@435 395 new_obj = o->forwardee();
duke@435 396 }
duke@435 397
duke@435 398 #ifdef DEBUG
duke@435 399 // This code must come after the CAS test, or it will print incorrect
duke@435 400 // information.
duke@435 401 if (TraceScavenge) {
coleenp@548 402 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
coleenp@548 403 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
duke@435 404 new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
duke@435 405 }
duke@435 406 #endif
duke@435 407
duke@435 408 return new_obj;
duke@435 409 }
duke@435 410
coleenp@548 411 template <class T> void PSPromotionManager::process_array_chunk_work(
coleenp@548 412 oop obj,
coleenp@548 413 int start, int end) {
jwilhelm@2648 414 assert(start <= end, "invariant");
coleenp@548 415 T* const base = (T*)objArrayOop(obj)->base();
coleenp@548 416 T* p = base + start;
coleenp@548 417 T* const chunk_end = base + end;
coleenp@548 418 while (p < chunk_end) {
coleenp@548 419 if (PSScavenge::should_scavenge(p)) {
coleenp@548 420 claim_or_forward_depth(p);
coleenp@548 421 }
coleenp@548 422 ++p;
coleenp@548 423 }
coleenp@548 424 }
coleenp@548 425
duke@435 426 void PSPromotionManager::process_array_chunk(oop old) {
duke@435 427 assert(PSChunkLargeArrays, "invariant");
duke@435 428 assert(old->is_objArray(), "invariant");
duke@435 429 assert(old->is_forwarded(), "invariant");
duke@435 430
jcoomes@2020 431 TASKQUEUE_STATS_ONLY(++_array_chunks_processed);
duke@435 432
duke@435 433 oop const obj = old->forwardee();
duke@435 434
duke@435 435 int start;
duke@435 436 int const end = arrayOop(old)->length();
duke@435 437 if (end > (int) _min_array_size_for_chunking) {
duke@435 438 // we'll chunk more
duke@435 439 start = end - _array_chunk_size;
duke@435 440 assert(start > 0, "invariant");
duke@435 441 arrayOop(old)->set_length(start);
duke@435 442 push_depth(mask_chunked_array_oop(old));
jcoomes@2020 443 TASKQUEUE_STATS_ONLY(++_masked_pushes);
duke@435 444 } else {
duke@435 445 // this is the final chunk for this array
duke@435 446 start = 0;
duke@435 447 int const actual_length = arrayOop(obj)->length();
duke@435 448 arrayOop(old)->set_length(actual_length);
duke@435 449 }
duke@435 450
coleenp@548 451 if (UseCompressedOops) {
coleenp@548 452 process_array_chunk_work<narrowOop>(obj, start, end);
coleenp@548 453 } else {
coleenp@548 454 process_array_chunk_work<oop>(obj, start, end);
duke@435 455 }
duke@435 456 }
duke@435 457
duke@435 458 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
duke@435 459 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
duke@435 460
duke@435 461 // Attempt to CAS in the header.
duke@435 462 // This tests if the header is still the same as when
duke@435 463 // this started. If it is the same (i.e., no forwarding
duke@435 464 // pointer has been installed), then this thread owns
duke@435 465 // it.
duke@435 466 if (obj->cas_forward_to(obj, obj_mark)) {
duke@435 467 // We won any races, we "own" this object.
duke@435 468 assert(obj == obj->forwardee(), "Sanity");
duke@435 469
tonyp@2061 470 obj->push_contents(this);
duke@435 471
duke@435 472 // Save the mark if needed
duke@435 473 PSScavenge::oop_promotion_failed(obj, obj_mark);
duke@435 474 } else {
duke@435 475 // We lost, someone else "owns" this object
duke@435 476 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
duke@435 477
duke@435 478 // No unallocation to worry about.
duke@435 479 obj = obj->forwardee();
duke@435 480 }
duke@435 481
duke@435 482 #ifdef DEBUG
duke@435 483 if (TraceScavenge) {
duke@435 484 gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
duke@435 485 "promotion-failure",
duke@435 486 obj->blueprint()->internal_name(),
duke@435 487 obj, obj->size());
duke@435 488
duke@435 489 }
duke@435 490 #endif
duke@435 491
duke@435 492 return obj;
duke@435 493 }

mercurial