src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp

changeset 435
a61af66fc99e
child 548
ba764ed4b6f2
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,625 @@
     1.4 +/*
     1.5 + * Copyright 2002-2006 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "incls/_precompiled.incl"
    1.29 +#include "incls/_psPromotionManager.cpp.incl"
    1.30 +
    1.31 +PSPromotionManager**         PSPromotionManager::_manager_array = NULL;
    1.32 +OopStarTaskQueueSet*         PSPromotionManager::_stack_array_depth = NULL;
    1.33 +OopTaskQueueSet*             PSPromotionManager::_stack_array_breadth = NULL;
    1.34 +PSOldGen*                    PSPromotionManager::_old_gen = NULL;
    1.35 +MutableSpace*                PSPromotionManager::_young_space = NULL;
    1.36 +
    1.37 +void PSPromotionManager::initialize() {
    1.38 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    1.39 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    1.40 +
    1.41 +  _old_gen = heap->old_gen();
    1.42 +  _young_space = heap->young_gen()->to_space();
    1.43 +
    1.44 +  assert(_manager_array == NULL, "Attempt to initialize twice");
    1.45 +  _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 );
    1.46 +  guarantee(_manager_array != NULL, "Could not initialize promotion manager");
    1.47 +
    1.48 +  if (UseDepthFirstScavengeOrder) {
    1.49 +    _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
    1.50 +    guarantee(_stack_array_depth != NULL, "Count not initialize promotion manager");
    1.51 +  } else {
    1.52 +    _stack_array_breadth = new OopTaskQueueSet(ParallelGCThreads);
    1.53 +    guarantee(_stack_array_breadth != NULL, "Count not initialize promotion manager");
    1.54 +  }
    1.55 +
    1.56 +  // Create and register the PSPromotionManager(s) for the worker threads.
    1.57 +  for(uint i=0; i<ParallelGCThreads; i++) {
    1.58 +    _manager_array[i] = new PSPromotionManager();
    1.59 +    guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
    1.60 +    if (UseDepthFirstScavengeOrder) {
    1.61 +      stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
    1.62 +    } else {
    1.63 +      stack_array_breadth()->register_queue(i, _manager_array[i]->claimed_stack_breadth());
    1.64 +    }
    1.65 +  }
    1.66 +
    1.67 +  // The VMThread gets its own PSPromotionManager, which is not available
    1.68 +  // for work stealing.
    1.69 +  _manager_array[ParallelGCThreads] = new PSPromotionManager();
    1.70 +  guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
    1.71 +}
    1.72 +
    1.73 +PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
    1.74 +  assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
    1.75 +  assert(_manager_array != NULL, "Sanity");
    1.76 +  return _manager_array[index];
    1.77 +}
    1.78 +
    1.79 +PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
    1.80 +  assert(_manager_array != NULL, "Sanity");
    1.81 +  return _manager_array[ParallelGCThreads];
    1.82 +}
    1.83 +
    1.84 +void PSPromotionManager::pre_scavenge() {
    1.85 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    1.86 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    1.87 +
    1.88 +  _young_space = heap->young_gen()->to_space();
    1.89 +
    1.90 +  for(uint i=0; i<ParallelGCThreads+1; i++) {
    1.91 +    manager_array(i)->reset();
    1.92 +  }
    1.93 +}
    1.94 +
    1.95 +void PSPromotionManager::post_scavenge() {
    1.96 +#if PS_PM_STATS
    1.97 +  print_stats();
    1.98 +#endif // PS_PM_STATS
    1.99 +
   1.100 +  for(uint i=0; i<ParallelGCThreads+1; i++) {
   1.101 +    PSPromotionManager* manager = manager_array(i);
   1.102 +
   1.103 +    // the guarantees are a bit gratuitous but, if one fires, we'll
   1.104 +    // have a better idea of what went wrong
   1.105 +    if (i < ParallelGCThreads) {
   1.106 +      guarantee((!UseDepthFirstScavengeOrder ||
   1.107 +                 manager->overflow_stack_depth()->length() <= 0),
   1.108 +                "promotion manager overflow stack must be empty");
   1.109 +      guarantee((UseDepthFirstScavengeOrder ||
   1.110 +                 manager->overflow_stack_breadth()->length() <= 0),
   1.111 +                "promotion manager overflow stack must be empty");
   1.112 +
   1.113 +      guarantee((!UseDepthFirstScavengeOrder ||
   1.114 +                 manager->claimed_stack_depth()->size() <= 0),
   1.115 +                "promotion manager claimed stack must be empty");
   1.116 +      guarantee((UseDepthFirstScavengeOrder ||
   1.117 +                 manager->claimed_stack_breadth()->size() <= 0),
   1.118 +                "promotion manager claimed stack must be empty");
   1.119 +    } else {
   1.120 +      guarantee((!UseDepthFirstScavengeOrder ||
   1.121 +                 manager->overflow_stack_depth()->length() <= 0),
   1.122 +                "VM Thread promotion manager overflow stack "
   1.123 +                "must be empty");
   1.124 +      guarantee((UseDepthFirstScavengeOrder ||
   1.125 +                 manager->overflow_stack_breadth()->length() <= 0),
   1.126 +                "VM Thread promotion manager overflow stack "
   1.127 +                "must be empty");
   1.128 +
   1.129 +      guarantee((!UseDepthFirstScavengeOrder ||
   1.130 +                 manager->claimed_stack_depth()->size() <= 0),
   1.131 +                "VM Thread promotion manager claimed stack "
   1.132 +                "must be empty");
   1.133 +      guarantee((UseDepthFirstScavengeOrder ||
   1.134 +                 manager->claimed_stack_breadth()->size() <= 0),
   1.135 +                "VM Thread promotion manager claimed stack "
   1.136 +                "must be empty");
   1.137 +    }
   1.138 +
   1.139 +    manager->flush_labs();
   1.140 +  }
   1.141 +}
   1.142 +
   1.143 +#if PS_PM_STATS
   1.144 +
   1.145 +void
   1.146 +PSPromotionManager::print_stats(uint i) {
   1.147 +  tty->print_cr("---- GC Worker %2d Stats", i);
   1.148 +  tty->print_cr("    total pushes            %8d", _total_pushes);
   1.149 +  tty->print_cr("    masked pushes           %8d", _masked_pushes);
   1.150 +  tty->print_cr("    overflow pushes         %8d", _overflow_pushes);
   1.151 +  tty->print_cr("    max overflow length     %8d", _max_overflow_length);
   1.152 +  tty->print_cr("");
   1.153 +  tty->print_cr("    arrays chunked          %8d", _arrays_chunked);
   1.154 +  tty->print_cr("    array chunks processed  %8d", _array_chunks_processed);
   1.155 +  tty->print_cr("");
   1.156 +  tty->print_cr("    total steals            %8d", _total_steals);
   1.157 +  tty->print_cr("    masked steals           %8d", _masked_steals);
   1.158 +  tty->print_cr("");
   1.159 +}
   1.160 +
   1.161 +void
   1.162 +PSPromotionManager::print_stats() {
   1.163 +  tty->print_cr("== GC Tasks Stats (%s), GC %3d",
   1.164 +                (UseDepthFirstScavengeOrder) ? "Depth-First" : "Breadth-First",
   1.165 +                Universe::heap()->total_collections());
   1.166 +
   1.167 +  for (uint i = 0; i < ParallelGCThreads+1; ++i) {
   1.168 +    PSPromotionManager* manager = manager_array(i);
   1.169 +    manager->print_stats(i);
   1.170 +  }
   1.171 +}
   1.172 +
   1.173 +#endif // PS_PM_STATS
   1.174 +
   1.175 +PSPromotionManager::PSPromotionManager() {
   1.176 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.177 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.178 +  _depth_first = UseDepthFirstScavengeOrder;
   1.179 +
   1.180 +  // We set the old lab's start array.
   1.181 +  _old_lab.set_start_array(old_gen()->start_array());
   1.182 +
   1.183 +  uint queue_size;
   1.184 +  if (depth_first()) {
   1.185 +    claimed_stack_depth()->initialize();
   1.186 +    queue_size = claimed_stack_depth()->max_elems();
   1.187 +    // We want the overflow stack to be permanent
   1.188 +    _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<oop*>(10, true);
   1.189 +    _overflow_stack_breadth = NULL;
   1.190 +  } else {
   1.191 +    claimed_stack_breadth()->initialize();
   1.192 +    queue_size = claimed_stack_breadth()->max_elems();
   1.193 +    // We want the overflow stack to be permanent
   1.194 +    _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
   1.195 +    _overflow_stack_depth = NULL;
   1.196 +  }
   1.197 +
   1.198 +  _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
   1.199 +  if (_totally_drain) {
   1.200 +    _target_stack_size = 0;
   1.201 +  } else {
   1.202 +    // don't let the target stack size to be more than 1/4 of the entries
   1.203 +    _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
   1.204 +                                     (uint) (queue_size / 4));
   1.205 +  }
   1.206 +
   1.207 +  _array_chunk_size = ParGCArrayScanChunk;
   1.208 +  // let's choose 1.5x the chunk size
   1.209 +  _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
   1.210 +
   1.211 +  reset();
   1.212 +}
   1.213 +
   1.214 +void PSPromotionManager::reset() {
   1.215 +  assert(claimed_stack_empty(), "reset of non-empty claimed stack");
   1.216 +  assert(overflow_stack_empty(), "reset of non-empty overflow stack");
   1.217 +
   1.218 +  // We need to get an assert in here to make sure the labs are always flushed.
   1.219 +
   1.220 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.221 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.222 +
   1.223 +  // Do not prefill the LAB's, save heap wastage!
   1.224 +  HeapWord* lab_base = young_space()->top();
   1.225 +  _young_lab.initialize(MemRegion(lab_base, (size_t)0));
   1.226 +  _young_gen_is_full = false;
   1.227 +
   1.228 +  lab_base = old_gen()->object_space()->top();
   1.229 +  _old_lab.initialize(MemRegion(lab_base, (size_t)0));
   1.230 +  _old_gen_is_full = false;
   1.231 +
   1.232 +  _prefetch_queue.clear();
   1.233 +
   1.234 +#if PS_PM_STATS
   1.235 +  _total_pushes = 0;
   1.236 +  _masked_pushes = 0;
   1.237 +  _overflow_pushes = 0;
   1.238 +  _max_overflow_length = 0;
   1.239 +  _arrays_chunked = 0;
   1.240 +  _array_chunks_processed = 0;
   1.241 +  _total_steals = 0;
   1.242 +  _masked_steals = 0;
   1.243 +#endif // PS_PM_STATS
   1.244 +}
   1.245 +
   1.246 +void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
   1.247 +  assert(depth_first(), "invariant");
   1.248 +  assert(overflow_stack_depth() != NULL, "invariant");
   1.249 +  totally_drain = totally_drain || _totally_drain;
   1.250 +
   1.251 +#ifdef ASSERT
   1.252 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.253 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.254 +  MutableSpace* to_space = heap->young_gen()->to_space();
   1.255 +  MutableSpace* old_space = heap->old_gen()->object_space();
   1.256 +  MutableSpace* perm_space = heap->perm_gen()->object_space();
   1.257 +#endif /* ASSERT */
   1.258 +
   1.259 +  do {
   1.260 +    oop* p;
   1.261 +
   1.262 +    // Drain overflow stack first, so other threads can steal from
   1.263 +    // claimed stack while we work.
   1.264 +    while(!overflow_stack_depth()->is_empty()) {
   1.265 +      p = overflow_stack_depth()->pop();
   1.266 +      process_popped_location_depth(p);
   1.267 +    }
   1.268 +
   1.269 +    if (totally_drain) {
   1.270 +      while (claimed_stack_depth()->pop_local(p)) {
   1.271 +        process_popped_location_depth(p);
   1.272 +      }
   1.273 +    } else {
   1.274 +      while (claimed_stack_depth()->size() > _target_stack_size &&
   1.275 +             claimed_stack_depth()->pop_local(p)) {
   1.276 +        process_popped_location_depth(p);
   1.277 +      }
   1.278 +    }
   1.279 +  } while( (totally_drain && claimed_stack_depth()->size() > 0) ||
   1.280 +           (overflow_stack_depth()->length() > 0) );
   1.281 +
   1.282 +  assert(!totally_drain || claimed_stack_empty(), "Sanity");
   1.283 +  assert(totally_drain ||
   1.284 +         claimed_stack_depth()->size() <= _target_stack_size,
   1.285 +         "Sanity");
   1.286 +  assert(overflow_stack_empty(), "Sanity");
   1.287 +}
   1.288 +
   1.289 +void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
   1.290 +  assert(!depth_first(), "invariant");
   1.291 +  assert(overflow_stack_breadth() != NULL, "invariant");
   1.292 +  totally_drain = totally_drain || _totally_drain;
   1.293 +
   1.294 +#ifdef ASSERT
   1.295 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.296 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.297 +  MutableSpace* to_space = heap->young_gen()->to_space();
   1.298 +  MutableSpace* old_space = heap->old_gen()->object_space();
   1.299 +  MutableSpace* perm_space = heap->perm_gen()->object_space();
   1.300 +#endif /* ASSERT */
   1.301 +
   1.302 +  do {
   1.303 +    oop obj;
   1.304 +
   1.305 +    // Drain overflow stack first, so other threads can steal from
   1.306 +    // claimed stack while we work.
   1.307 +    while(!overflow_stack_breadth()->is_empty()) {
   1.308 +      obj = overflow_stack_breadth()->pop();
   1.309 +      obj->copy_contents(this);
   1.310 +    }
   1.311 +
   1.312 +    if (totally_drain) {
   1.313 +      // obj is a reference!!!
   1.314 +      while (claimed_stack_breadth()->pop_local(obj)) {
   1.315 +        // It would be nice to assert about the type of objects we might
   1.316 +        // pop, but they can come from anywhere, unfortunately.
   1.317 +        obj->copy_contents(this);
   1.318 +      }
   1.319 +    } else {
   1.320 +      // obj is a reference!!!
   1.321 +      while (claimed_stack_breadth()->size() > _target_stack_size &&
   1.322 +             claimed_stack_breadth()->pop_local(obj)) {
   1.323 +        // It would be nice to assert about the type of objects we might
   1.324 +        // pop, but they can come from anywhere, unfortunately.
   1.325 +        obj->copy_contents(this);
   1.326 +      }
   1.327 +    }
   1.328 +
   1.329 +    // If we could not find any other work, flush the prefetch queue
   1.330 +    if (claimed_stack_breadth()->size() == 0 &&
   1.331 +        (overflow_stack_breadth()->length() == 0)) {
   1.332 +      flush_prefetch_queue();
   1.333 +    }
   1.334 +  } while((totally_drain && claimed_stack_breadth()->size() > 0) ||
   1.335 +          (overflow_stack_breadth()->length() > 0));
   1.336 +
   1.337 +  assert(!totally_drain || claimed_stack_empty(), "Sanity");
   1.338 +  assert(totally_drain ||
   1.339 +         claimed_stack_breadth()->size() <= _target_stack_size,
   1.340 +         "Sanity");
   1.341 +  assert(overflow_stack_empty(), "Sanity");
   1.342 +}
   1.343 +
   1.344 +void PSPromotionManager::flush_labs() {
   1.345 +  assert(claimed_stack_empty(), "Attempt to flush lab with live stack");
   1.346 +  assert(overflow_stack_empty(), "Attempt to flush lab with live overflow stack");
   1.347 +
   1.348 +  // If either promotion lab fills up, we can flush the
   1.349 +  // lab but not refill it, so check first.
   1.350 +  assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
   1.351 +  if (!_young_lab.is_flushed())
   1.352 +    _young_lab.flush();
   1.353 +
   1.354 +  assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
   1.355 +  if (!_old_lab.is_flushed())
   1.356 +    _old_lab.flush();
   1.357 +
   1.358 +  // Let PSScavenge know if we overflowed
   1.359 +  if (_young_gen_is_full) {
   1.360 +    PSScavenge::set_survivor_overflow(true);
   1.361 +  }
   1.362 +}
   1.363 +
   1.364 +//
   1.365 +// This method is pretty bulky. It would be nice to split it up
   1.366 +// into smaller submethods, but we need to be careful not to hurt
   1.367 +// performance.
   1.368 +//
   1.369 +
   1.370 +oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
   1.371 +  assert(PSScavenge::should_scavenge(o), "Sanity");
   1.372 +
   1.373 +  oop new_obj = NULL;
   1.374 +
   1.375 +  // NOTE! We must be very careful with any methods that access the mark
   1.376 +  // in o. There may be multiple threads racing on it, and it may be forwarded
   1.377 +  // at any time. Do not use oop methods for accessing the mark!
   1.378 +  markOop test_mark = o->mark();
   1.379 +
   1.380 +  // The same test as "o->is_forwarded()"
   1.381 +  if (!test_mark->is_marked()) {
   1.382 +    bool new_obj_is_tenured = false;
   1.383 +    size_t new_obj_size = o->size();
   1.384 +
   1.385 +    // Find the objects age, MT safe.
   1.386 +    int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
   1.387 +      test_mark->displaced_mark_helper()->age() : test_mark->age();
   1.388 +
   1.389 +    // Try allocating obj in to-space (unless too old)
   1.390 +    if (age < PSScavenge::tenuring_threshold()) {
   1.391 +      new_obj = (oop) _young_lab.allocate(new_obj_size);
   1.392 +      if (new_obj == NULL && !_young_gen_is_full) {
   1.393 +        // Do we allocate directly, or flush and refill?
   1.394 +        if (new_obj_size > (YoungPLABSize / 2)) {
   1.395 +          // Allocate this object directly
   1.396 +          new_obj = (oop)young_space()->cas_allocate(new_obj_size);
   1.397 +        } else {
   1.398 +          // Flush and fill
   1.399 +          _young_lab.flush();
   1.400 +
   1.401 +          HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
   1.402 +          if (lab_base != NULL) {
   1.403 +            _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
   1.404 +            // Try the young lab allocation again.
   1.405 +            new_obj = (oop) _young_lab.allocate(new_obj_size);
   1.406 +          } else {
   1.407 +            _young_gen_is_full = true;
   1.408 +          }
   1.409 +        }
   1.410 +      }
   1.411 +    }
   1.412 +
   1.413 +    // Otherwise try allocating obj tenured
   1.414 +    if (new_obj == NULL) {
   1.415 +#ifndef PRODUCT
   1.416 +      if (Universe::heap()->promotion_should_fail()) {
   1.417 +        return oop_promotion_failed(o, test_mark);
   1.418 +      }
   1.419 +#endif  // #ifndef PRODUCT
   1.420 +
   1.421 +      new_obj = (oop) _old_lab.allocate(new_obj_size);
   1.422 +      new_obj_is_tenured = true;
   1.423 +
   1.424 +      if (new_obj == NULL) {
   1.425 +        if (!_old_gen_is_full) {
   1.426 +          // Do we allocate directly, or flush and refill?
   1.427 +          if (new_obj_size > (OldPLABSize / 2)) {
   1.428 +            // Allocate this object directly
   1.429 +            new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
   1.430 +          } else {
   1.431 +            // Flush and fill
   1.432 +            _old_lab.flush();
   1.433 +
   1.434 +            HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
   1.435 +            if(lab_base != NULL) {
   1.436 +              _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
   1.437 +              // Try the old lab allocation again.
   1.438 +              new_obj = (oop) _old_lab.allocate(new_obj_size);
   1.439 +            }
   1.440 +          }
   1.441 +        }
   1.442 +
   1.443 +        // This is the promotion failed test, and code handling.
   1.444 +        // The code belongs here for two reasons. It is slightly
   1.445 +        // different thatn the code below, and cannot share the
   1.446 +        // CAS testing code. Keeping the code here also minimizes
   1.447 +        // the impact on the common case fast path code.
   1.448 +
   1.449 +        if (new_obj == NULL) {
   1.450 +          _old_gen_is_full = true;
   1.451 +          return oop_promotion_failed(o, test_mark);
   1.452 +        }
   1.453 +      }
   1.454 +    }
   1.455 +
   1.456 +    assert(new_obj != NULL, "allocation should have succeeded");
   1.457 +
   1.458 +    // Copy obj
   1.459 +    Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
   1.460 +
   1.461 +    // Now we have to CAS in the header.
   1.462 +    if (o->cas_forward_to(new_obj, test_mark)) {
   1.463 +      // We won any races, we "own" this object.
   1.464 +      assert(new_obj == o->forwardee(), "Sanity");
   1.465 +
   1.466 +      // Increment age if obj still in new generation. Now that
   1.467 +      // we're dealing with a markOop that cannot change, it is
   1.468 +      // okay to use the non mt safe oop methods.
   1.469 +      if (!new_obj_is_tenured) {
   1.470 +        new_obj->incr_age();
   1.471 +        assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
   1.472 +      }
   1.473 +
   1.474 +      if (depth_first) {
   1.475 +        // Do the size comparison first with new_obj_size, which we
   1.476 +        // already have. Hopefully, only a few objects are larger than
   1.477 +        // _min_array_size_for_chunking, and most of them will be arrays.
   1.478 +        // So, the is->objArray() test would be very infrequent.
   1.479 +        if (new_obj_size > _min_array_size_for_chunking &&
   1.480 +            new_obj->is_objArray() &&
   1.481 +            PSChunkLargeArrays) {
   1.482 +          // we'll chunk it
   1.483 +#if PS_PM_STATS
   1.484 +          ++_arrays_chunked;
   1.485 +#endif // PS_PM_STATS
   1.486 +          oop* const masked_o = mask_chunked_array_oop(o);
   1.487 +          push_depth(masked_o);
   1.488 +#if PS_PM_STATS
   1.489 +          ++_masked_pushes;
   1.490 +#endif // PS_PM_STATS
   1.491 +        } else {
   1.492 +          // we'll just push its contents
   1.493 +          new_obj->push_contents(this);
   1.494 +        }
   1.495 +      } else {
   1.496 +        push_breadth(new_obj);
   1.497 +      }
   1.498 +    }  else {
   1.499 +      // We lost, someone else "owns" this object
   1.500 +      guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
   1.501 +
   1.502 +      // Unallocate the space used. NOTE! We may have directly allocated
   1.503 +      // the object. If so, we cannot deallocate it, so we have to test!
   1.504 +      if (new_obj_is_tenured) {
   1.505 +        if (!_old_lab.unallocate_object(new_obj)) {
   1.506 +          // The promotion lab failed to unallocate the object.
   1.507 +          // We need to overwrite the object with a filler that
   1.508 +          // contains no interior pointers.
   1.509 +          MemRegion mr((HeapWord*)new_obj, new_obj_size);
   1.510 +          // Clean this up and move to oopFactory (see bug 4718422)
   1.511 +          SharedHeap::fill_region_with_object(mr);
   1.512 +        }
   1.513 +      } else {
   1.514 +        if (!_young_lab.unallocate_object(new_obj)) {
   1.515 +          // The promotion lab failed to unallocate the object.
   1.516 +          // We need to overwrite the object with a filler that
   1.517 +          // contains no interior pointers.
   1.518 +          MemRegion mr((HeapWord*)new_obj, new_obj_size);
   1.519 +          // Clean this up and move to oopFactory (see bug 4718422)
   1.520 +          SharedHeap::fill_region_with_object(mr);
   1.521 +        }
   1.522 +      }
   1.523 +
   1.524 +      // don't update this before the unallocation!
   1.525 +      new_obj = o->forwardee();
   1.526 +    }
   1.527 +  } else {
   1.528 +    assert(o->is_forwarded(), "Sanity");
   1.529 +    new_obj = o->forwardee();
   1.530 +  }
   1.531 +
   1.532 +#ifdef DEBUG
   1.533 +  // This code must come after the CAS test, or it will print incorrect
   1.534 +  // information.
   1.535 +  if (TraceScavenge) {
   1.536 +    gclog_or_tty->print_cr("{%s %s 0x%x -> 0x%x (%d)}",
   1.537 +       PSScavenge::should_scavenge(new_obj) ? "copying" : "tenuring",
   1.538 +       new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
   1.539 +
   1.540 +  }
   1.541 +#endif
   1.542 +
   1.543 +  return new_obj;
   1.544 +}
   1.545 +
   1.546 +void PSPromotionManager::process_array_chunk(oop old) {
   1.547 +  assert(PSChunkLargeArrays, "invariant");
   1.548 +  assert(old->is_objArray(), "invariant");
   1.549 +  assert(old->is_forwarded(), "invariant");
   1.550 +
   1.551 +#if PS_PM_STATS
   1.552 +  ++_array_chunks_processed;
   1.553 +#endif // PS_PM_STATS
   1.554 +
   1.555 +  oop const obj = old->forwardee();
   1.556 +
   1.557 +  int start;
   1.558 +  int const end = arrayOop(old)->length();
   1.559 +  if (end > (int) _min_array_size_for_chunking) {
   1.560 +    // we'll chunk more
   1.561 +    start = end - _array_chunk_size;
   1.562 +    assert(start > 0, "invariant");
   1.563 +    arrayOop(old)->set_length(start);
   1.564 +    push_depth(mask_chunked_array_oop(old));
   1.565 +#if PS_PM_STATS
   1.566 +    ++_masked_pushes;
   1.567 +#endif // PS_PM_STATS
   1.568 +  } else {
   1.569 +    // this is the final chunk for this array
   1.570 +    start = 0;
   1.571 +    int const actual_length = arrayOop(obj)->length();
   1.572 +    arrayOop(old)->set_length(actual_length);
   1.573 +  }
   1.574 +
   1.575 +  assert(start < end, "invariant");
   1.576 +  oop* const base      = objArrayOop(obj)->base();
   1.577 +  oop* p               = base + start;
   1.578 +  oop* const chunk_end = base + end;
   1.579 +  while (p < chunk_end) {
   1.580 +    if (PSScavenge::should_scavenge(*p)) {
   1.581 +      claim_or_forward_depth(p);
   1.582 +    }
   1.583 +    ++p;
   1.584 +  }
   1.585 +}
   1.586 +
   1.587 +oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
   1.588 +  assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
   1.589 +
   1.590 +  // Attempt to CAS in the header.
   1.591 +  // This tests if the header is still the same as when
   1.592 +  // this started.  If it is the same (i.e., no forwarding
   1.593 +  // pointer has been installed), then this thread owns
   1.594 +  // it.
   1.595 +  if (obj->cas_forward_to(obj, obj_mark)) {
   1.596 +    // We won any races, we "own" this object.
   1.597 +    assert(obj == obj->forwardee(), "Sanity");
   1.598 +
   1.599 +    if (depth_first()) {
   1.600 +      obj->push_contents(this);
   1.601 +    } else {
   1.602 +      // Don't bother incrementing the age, just push
   1.603 +      // onto the claimed_stack..
   1.604 +      push_breadth(obj);
   1.605 +    }
   1.606 +
   1.607 +    // Save the mark if needed
   1.608 +    PSScavenge::oop_promotion_failed(obj, obj_mark);
   1.609 +  }  else {
   1.610 +    // We lost, someone else "owns" this object
   1.611 +    guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
   1.612 +
   1.613 +    // No unallocation to worry about.
   1.614 +    obj = obj->forwardee();
   1.615 +  }
   1.616 +
   1.617 +#ifdef DEBUG
   1.618 +  if (TraceScavenge) {
   1.619 +    gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
   1.620 +                           "promotion-failure",
   1.621 +                           obj->blueprint()->internal_name(),
   1.622 +                           obj, obj->size());
   1.623 +
   1.624 +  }
   1.625 +#endif
   1.626 +
   1.627 +  return obj;
   1.628 +}

mercurial