src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp

Fri, 16 Jul 2010 21:33:21 -0700

author
jcoomes
date
Fri, 16 Jul 2010 21:33:21 -0700
changeset 2020
a93a9eda13f7
parent 1993
b2a00dd3117c
child 2061
9d7a8ab3736b
permissions
-rw-r--r--

6962947: shared TaskQueue statistics
Reviewed-by: tonyp, ysr

     1 /*
     2  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_psPromotionManager.cpp.incl"
    28 PSPromotionManager**         PSPromotionManager::_manager_array = NULL;
    29 OopStarTaskQueueSet*         PSPromotionManager::_stack_array_depth = NULL;
    30 OopTaskQueueSet*             PSPromotionManager::_stack_array_breadth = NULL;
    31 PSOldGen*                    PSPromotionManager::_old_gen = NULL;
    32 MutableSpace*                PSPromotionManager::_young_space = NULL;
    34 void PSPromotionManager::initialize() {
    35   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    36   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    38   _old_gen = heap->old_gen();
    39   _young_space = heap->young_gen()->to_space();
    41   assert(_manager_array == NULL, "Attempt to initialize twice");
    42   _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 );
    43   guarantee(_manager_array != NULL, "Could not initialize promotion manager");
    45   if (UseDepthFirstScavengeOrder) {
    46     _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
    47     guarantee(_stack_array_depth != NULL, "Count not initialize promotion manager");
    48   } else {
    49     _stack_array_breadth = new OopTaskQueueSet(ParallelGCThreads);
    50     guarantee(_stack_array_breadth != NULL, "Count not initialize promotion manager");
    51   }
    53   // Create and register the PSPromotionManager(s) for the worker threads.
    54   for(uint i=0; i<ParallelGCThreads; i++) {
    55     _manager_array[i] = new PSPromotionManager();
    56     guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
    57     if (UseDepthFirstScavengeOrder) {
    58       stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
    59     } else {
    60       stack_array_breadth()->register_queue(i, _manager_array[i]->claimed_stack_breadth());
    61     }
    62   }
    64   // The VMThread gets its own PSPromotionManager, which is not available
    65   // for work stealing.
    66   _manager_array[ParallelGCThreads] = new PSPromotionManager();
    67   guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
    68 }
    70 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
    71   assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
    72   assert(_manager_array != NULL, "Sanity");
    73   return _manager_array[index];
    74 }
    76 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
    77   assert(_manager_array != NULL, "Sanity");
    78   return _manager_array[ParallelGCThreads];
    79 }
    81 void PSPromotionManager::pre_scavenge() {
    82   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    83   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    85   _young_space = heap->young_gen()->to_space();
    87   for(uint i=0; i<ParallelGCThreads+1; i++) {
    88     manager_array(i)->reset();
    89   }
    90 }
    92 void PSPromotionManager::post_scavenge() {
    93   TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
    94   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
    95     PSPromotionManager* manager = manager_array(i);
    96     if (UseDepthFirstScavengeOrder) {
    97       assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
    98     } else {
    99       assert(manager->claimed_stack_breadth()->is_empty(), "should be empty");
   100     }
   101     manager->flush_labs();
   102   }
   103 }
   105 #if TASKQUEUE_STATS
   106 void
   107 PSPromotionManager::print_taskqueue_stats(uint i) const {
   108   const TaskQueueStats& stats = depth_first() ?
   109     _claimed_stack_depth.stats : _claimed_stack_breadth.stats;
   110   tty->print("%3u ", i);
   111   stats.print();
   112   tty->cr();
   113 }
   115 void
   116 PSPromotionManager::print_local_stats(uint i) const {
   117   #define FMT " " SIZE_FORMAT_W(10)
   118   tty->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
   119                 _arrays_chunked, _array_chunks_processed);
   120   #undef FMT
   121 }
   123 static const char* const pm_stats_hdr[] = {
   124   "    --------masked-------     arrays      array",
   125   "thr       push      steal    chunked     chunks",
   126   "--- ---------- ---------- ---------- ----------"
   127 };
   129 void
   130 PSPromotionManager::print_stats() {
   131   const bool df = UseDepthFirstScavengeOrder;
   132   tty->print_cr("== GC Task Stats (%s-First), GC %3d", df ? "Depth" : "Breadth",
   133                 Universe::heap()->total_collections());
   135   tty->print("thr "); TaskQueueStats::print_header(1); tty->cr();
   136   tty->print("--- "); TaskQueueStats::print_header(2); tty->cr();
   137   for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
   138     manager_array(i)->print_taskqueue_stats(i);
   139   }
   141   const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
   142   for (uint i = 0; i < hlines; ++i) tty->print_cr(pm_stats_hdr[i]);
   143   for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
   144     manager_array(i)->print_local_stats(i);
   145   }
   146 }
   148 void
   149 PSPromotionManager::reset_stats() {
   150   TaskQueueStats& stats = depth_first() ?
   151     claimed_stack_depth()->stats : claimed_stack_breadth()->stats;
   152   stats.reset();
   153   _masked_pushes = _masked_steals = 0;
   154   _arrays_chunked = _array_chunks_processed = 0;
   155 }
   156 #endif // TASKQUEUE_STATS
   158 PSPromotionManager::PSPromotionManager() {
   159   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   160   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   161   _depth_first = UseDepthFirstScavengeOrder;
   163   // We set the old lab's start array.
   164   _old_lab.set_start_array(old_gen()->start_array());
   166   uint queue_size;
   167   if (depth_first()) {
   168     claimed_stack_depth()->initialize();
   169     queue_size = claimed_stack_depth()->max_elems();
   170   } else {
   171     claimed_stack_breadth()->initialize();
   172     queue_size = claimed_stack_breadth()->max_elems();
   173   }
   175   _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
   176   if (_totally_drain) {
   177     _target_stack_size = 0;
   178   } else {
   179     // don't let the target stack size to be more than 1/4 of the entries
   180     _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
   181                                      (uint) (queue_size / 4));
   182   }
   184   _array_chunk_size = ParGCArrayScanChunk;
   185   // let's choose 1.5x the chunk size
   186   _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
   188   reset();
   189 }
   191 void PSPromotionManager::reset() {
   192   assert(stacks_empty(), "reset of non-empty stack");
   194   // We need to get an assert in here to make sure the labs are always flushed.
   196   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   197   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   199   // Do not prefill the LAB's, save heap wastage!
   200   HeapWord* lab_base = young_space()->top();
   201   _young_lab.initialize(MemRegion(lab_base, (size_t)0));
   202   _young_gen_is_full = false;
   204   lab_base = old_gen()->object_space()->top();
   205   _old_lab.initialize(MemRegion(lab_base, (size_t)0));
   206   _old_gen_is_full = false;
   208   _prefetch_queue.clear();
   210   TASKQUEUE_STATS_ONLY(reset_stats());
   211 }
   214 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
   215   assert(depth_first(), "invariant");
   216   assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant");
   217   totally_drain = totally_drain || _totally_drain;
   219 #ifdef ASSERT
   220   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   221   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   222   MutableSpace* to_space = heap->young_gen()->to_space();
   223   MutableSpace* old_space = heap->old_gen()->object_space();
   224   MutableSpace* perm_space = heap->perm_gen()->object_space();
   225 #endif /* ASSERT */
   227   OopStarTaskQueue* const tq = claimed_stack_depth();
   228   do {
   229     StarTask p;
   231     // Drain overflow stack first, so other threads can steal from
   232     // claimed stack while we work.
   233     while (tq->pop_overflow(p)) {
   234       process_popped_location_depth(p);
   235     }
   237     if (totally_drain) {
   238       while (tq->pop_local(p)) {
   239         process_popped_location_depth(p);
   240       }
   241     } else {
   242       while (tq->size() > _target_stack_size && tq->pop_local(p)) {
   243         process_popped_location_depth(p);
   244       }
   245     }
   246   } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
   248   assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
   249   assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
   250   assert(tq->overflow_empty(), "Sanity");
   251 }
   253 void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
   254   assert(!depth_first(), "invariant");
   255   assert(claimed_stack_breadth()->overflow_stack() != NULL, "invariant");
   256   totally_drain = totally_drain || _totally_drain;
   258 #ifdef ASSERT
   259   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   260   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   261   MutableSpace* to_space = heap->young_gen()->to_space();
   262   MutableSpace* old_space = heap->old_gen()->object_space();
   263   MutableSpace* perm_space = heap->perm_gen()->object_space();
   264 #endif /* ASSERT */
   266   OverflowTaskQueue<oop>* const tq = claimed_stack_breadth();
   267   do {
   268     oop obj;
   270     // Drain overflow stack first, so other threads can steal from
   271     // claimed stack while we work.
   272     while (tq->pop_overflow(obj)) {
   273       obj->copy_contents(this);
   274     }
   276     if (totally_drain) {
   277       while (tq->pop_local(obj)) {
   278         obj->copy_contents(this);
   279       }
   280     } else {
   281       while (tq->size() > _target_stack_size && tq->pop_local(obj)) {
   282         obj->copy_contents(this);
   283       }
   284     }
   286     // If we could not find any other work, flush the prefetch queue
   287     if (tq->is_empty()) {
   288       flush_prefetch_queue();
   289     }
   290   } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
   292   assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
   293   assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
   294   assert(tq->overflow_empty(), "Sanity");
   295 }
   297 void PSPromotionManager::flush_labs() {
   298   assert(stacks_empty(), "Attempt to flush lab with live stack");
   300   // If either promotion lab fills up, we can flush the
   301   // lab but not refill it, so check first.
   302   assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
   303   if (!_young_lab.is_flushed())
   304     _young_lab.flush();
   306   assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
   307   if (!_old_lab.is_flushed())
   308     _old_lab.flush();
   310   // Let PSScavenge know if we overflowed
   311   if (_young_gen_is_full) {
   312     PSScavenge::set_survivor_overflow(true);
   313   }
   314 }
   316 //
   317 // This method is pretty bulky. It would be nice to split it up
   318 // into smaller submethods, but we need to be careful not to hurt
   319 // performance.
   320 //
   322 oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
   323   assert(PSScavenge::should_scavenge(&o), "Sanity");
   325   oop new_obj = NULL;
   327   // NOTE! We must be very careful with any methods that access the mark
   328   // in o. There may be multiple threads racing on it, and it may be forwarded
   329   // at any time. Do not use oop methods for accessing the mark!
   330   markOop test_mark = o->mark();
   332   // The same test as "o->is_forwarded()"
   333   if (!test_mark->is_marked()) {
   334     bool new_obj_is_tenured = false;
   335     size_t new_obj_size = o->size();
   337     // Find the objects age, MT safe.
   338     int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
   339       test_mark->displaced_mark_helper()->age() : test_mark->age();
   341     // Try allocating obj in to-space (unless too old)
   342     if (age < PSScavenge::tenuring_threshold()) {
   343       new_obj = (oop) _young_lab.allocate(new_obj_size);
   344       if (new_obj == NULL && !_young_gen_is_full) {
   345         // Do we allocate directly, or flush and refill?
   346         if (new_obj_size > (YoungPLABSize / 2)) {
   347           // Allocate this object directly
   348           new_obj = (oop)young_space()->cas_allocate(new_obj_size);
   349         } else {
   350           // Flush and fill
   351           _young_lab.flush();
   353           HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
   354           if (lab_base != NULL) {
   355             _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
   356             // Try the young lab allocation again.
   357             new_obj = (oop) _young_lab.allocate(new_obj_size);
   358           } else {
   359             _young_gen_is_full = true;
   360           }
   361         }
   362       }
   363     }
   365     // Otherwise try allocating obj tenured
   366     if (new_obj == NULL) {
   367 #ifndef PRODUCT
   368       if (Universe::heap()->promotion_should_fail()) {
   369         return oop_promotion_failed(o, test_mark);
   370       }
   371 #endif  // #ifndef PRODUCT
   373       new_obj = (oop) _old_lab.allocate(new_obj_size);
   374       new_obj_is_tenured = true;
   376       if (new_obj == NULL) {
   377         if (!_old_gen_is_full) {
   378           // Do we allocate directly, or flush and refill?
   379           if (new_obj_size > (OldPLABSize / 2)) {
   380             // Allocate this object directly
   381             new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
   382           } else {
   383             // Flush and fill
   384             _old_lab.flush();
   386             HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
   387             if(lab_base != NULL) {
   388               _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
   389               // Try the old lab allocation again.
   390               new_obj = (oop) _old_lab.allocate(new_obj_size);
   391             }
   392           }
   393         }
   395         // This is the promotion failed test, and code handling.
   396         // The code belongs here for two reasons. It is slightly
   397         // different thatn the code below, and cannot share the
   398         // CAS testing code. Keeping the code here also minimizes
   399         // the impact on the common case fast path code.
   401         if (new_obj == NULL) {
   402           _old_gen_is_full = true;
   403           return oop_promotion_failed(o, test_mark);
   404         }
   405       }
   406     }
   408     assert(new_obj != NULL, "allocation should have succeeded");
   410     // Copy obj
   411     Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
   413     // Now we have to CAS in the header.
   414     if (o->cas_forward_to(new_obj, test_mark)) {
   415       // We won any races, we "own" this object.
   416       assert(new_obj == o->forwardee(), "Sanity");
   418       // Increment age if obj still in new generation. Now that
   419       // we're dealing with a markOop that cannot change, it is
   420       // okay to use the non mt safe oop methods.
   421       if (!new_obj_is_tenured) {
   422         new_obj->incr_age();
   423         assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
   424       }
   426       if (depth_first) {
   427         // Do the size comparison first with new_obj_size, which we
   428         // already have. Hopefully, only a few objects are larger than
   429         // _min_array_size_for_chunking, and most of them will be arrays.
   430         // So, the is->objArray() test would be very infrequent.
   431         if (new_obj_size > _min_array_size_for_chunking &&
   432             new_obj->is_objArray() &&
   433             PSChunkLargeArrays) {
   434           // we'll chunk it
   435           oop* const masked_o = mask_chunked_array_oop(o);
   436           push_depth(masked_o);
   437           TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
   438         } else {
   439           // we'll just push its contents
   440           new_obj->push_contents(this);
   441         }
   442       } else {
   443         push_breadth(new_obj);
   444       }
   445     }  else {
   446       // We lost, someone else "owns" this object
   447       guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
   449       // Try to deallocate the space.  If it was directly allocated we cannot
   450       // deallocate it, so we have to test.  If the deallocation fails,
   451       // overwrite with a filler object.
   452       if (new_obj_is_tenured) {
   453         if (!_old_lab.unallocate_object(new_obj)) {
   454           CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
   455         }
   456       } else if (!_young_lab.unallocate_object(new_obj)) {
   457         CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
   458       }
   460       // don't update this before the unallocation!
   461       new_obj = o->forwardee();
   462     }
   463   } else {
   464     assert(o->is_forwarded(), "Sanity");
   465     new_obj = o->forwardee();
   466   }
   468 #ifdef DEBUG
   469   // This code must come after the CAS test, or it will print incorrect
   470   // information.
   471   if (TraceScavenge) {
   472     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
   473        PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
   474        new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
   475   }
   476 #endif
   478   return new_obj;
   479 }
   481 template <class T> void PSPromotionManager::process_array_chunk_work(
   482                                                  oop obj,
   483                                                  int start, int end) {
   484   assert(start < end, "invariant");
   485   T* const base      = (T*)objArrayOop(obj)->base();
   486   T* p               = base + start;
   487   T* const chunk_end = base + end;
   488   while (p < chunk_end) {
   489     if (PSScavenge::should_scavenge(p)) {
   490       claim_or_forward_depth(p);
   491     }
   492     ++p;
   493   }
   494 }
   496 void PSPromotionManager::process_array_chunk(oop old) {
   497   assert(PSChunkLargeArrays, "invariant");
   498   assert(old->is_objArray(), "invariant");
   499   assert(old->is_forwarded(), "invariant");
   501   TASKQUEUE_STATS_ONLY(++_array_chunks_processed);
   503   oop const obj = old->forwardee();
   505   int start;
   506   int const end = arrayOop(old)->length();
   507   if (end > (int) _min_array_size_for_chunking) {
   508     // we'll chunk more
   509     start = end - _array_chunk_size;
   510     assert(start > 0, "invariant");
   511     arrayOop(old)->set_length(start);
   512     push_depth(mask_chunked_array_oop(old));
   513     TASKQUEUE_STATS_ONLY(++_masked_pushes);
   514   } else {
   515     // this is the final chunk for this array
   516     start = 0;
   517     int const actual_length = arrayOop(obj)->length();
   518     arrayOop(old)->set_length(actual_length);
   519   }
   521   if (UseCompressedOops) {
   522     process_array_chunk_work<narrowOop>(obj, start, end);
   523   } else {
   524     process_array_chunk_work<oop>(obj, start, end);
   525   }
   526 }
   528 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
   529   assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
   531   // Attempt to CAS in the header.
   532   // This tests if the header is still the same as when
   533   // this started.  If it is the same (i.e., no forwarding
   534   // pointer has been installed), then this thread owns
   535   // it.
   536   if (obj->cas_forward_to(obj, obj_mark)) {
   537     // We won any races, we "own" this object.
   538     assert(obj == obj->forwardee(), "Sanity");
   540     if (depth_first()) {
   541       obj->push_contents(this);
   542     } else {
   543       // Don't bother incrementing the age, just push
   544       // onto the claimed_stack..
   545       push_breadth(obj);
   546     }
   548     // Save the mark if needed
   549     PSScavenge::oop_promotion_failed(obj, obj_mark);
   550   }  else {
   551     // We lost, someone else "owns" this object
   552     guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
   554     // No unallocation to worry about.
   555     obj = obj->forwardee();
   556   }
   558 #ifdef DEBUG
   559   if (TraceScavenge) {
   560     gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
   561                            "promotion-failure",
   562                            obj->blueprint()->internal_name(),
   563                            obj, obj->size());
   565   }
   566 #endif
   568   return obj;
   569 }

mercurial