src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Thu, 26 Sep 2013 10:25:02 -0400

author
hseigel
date
Thu, 26 Sep 2013 10:25:02 -0400
changeset 5784
190899198332
parent 5516
330dfb0476f4
child 6131
86e6d691f2e1
permissions
-rw-r--r--

7195622: CheckUnhandledOops has limited usefulness now
Summary: Enable CHECK_UNHANDLED_OOPS in fastdebug builds across all supported platforms.
Reviewed-by: coleenp, hseigel, dholmes, stefank, twisti, ihse, rdurbin
Contributed-by: lois.foltan@oracle.com

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
    27 #include "gc_implementation/parNew/parNewGeneration.hpp"
    28 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
    29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
    30 #include "gc_implementation/shared/ageTable.hpp"
    31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
    32 #include "gc_implementation/shared/gcHeapSummary.hpp"
    33 #include "gc_implementation/shared/gcTimer.hpp"
    34 #include "gc_implementation/shared/gcTrace.hpp"
    35 #include "gc_implementation/shared/gcTraceTime.hpp"
    36 #include "gc_implementation/shared/copyFailedInfo.hpp"
    37 #include "gc_implementation/shared/spaceDecorator.hpp"
    38 #include "memory/defNewGeneration.inline.hpp"
    39 #include "memory/genCollectedHeap.hpp"
    40 #include "memory/genOopClosures.inline.hpp"
    41 #include "memory/generation.hpp"
    42 #include "memory/generation.inline.hpp"
    43 #include "memory/referencePolicy.hpp"
    44 #include "memory/resourceArea.hpp"
    45 #include "memory/sharedHeap.hpp"
    46 #include "memory/space.hpp"
    47 #include "oops/objArrayOop.hpp"
    48 #include "oops/oop.inline.hpp"
    49 #include "oops/oop.pcgc.inline.hpp"
    50 #include "runtime/handles.hpp"
    51 #include "runtime/handles.inline.hpp"
    52 #include "runtime/java.hpp"
    53 #include "runtime/thread.hpp"
    54 #include "utilities/copy.hpp"
    55 #include "utilities/globalDefinitions.hpp"
    56 #include "utilities/workgroup.hpp"
    58 #ifdef _MSC_VER
    59 #pragma warning( push )
    60 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
    61 #endif
    62 ParScanThreadState::ParScanThreadState(Space* to_space_,
    63                                        ParNewGeneration* gen_,
    64                                        Generation* old_gen_,
    65                                        int thread_num_,
    66                                        ObjToScanQueueSet* work_queue_set_,
    67                                        Stack<oop, mtGC>* overflow_stacks_,
    68                                        size_t desired_plab_sz_,
    69                                        ParallelTaskTerminator& term_) :
    70   _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
    71   _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
    72   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
    73   _ageTable(false), // false ==> not the global age table, no perf data.
    74   _to_space_alloc_buffer(desired_plab_sz_),
    75   _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
    76   _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
    77   _older_gen_closure(gen_, this),
    78   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
    79                       &_to_space_root_closure, gen_, &_old_gen_root_closure,
    80                       work_queue_set_, &term_),
    81   _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
    82   _keep_alive_closure(&_scan_weak_ref_closure),
    83   _strong_roots_time(0.0), _term_time(0.0)
    84 {
    85   #if TASKQUEUE_STATS
    86   _term_attempts = 0;
    87   _overflow_refills = 0;
    88   _overflow_refill_objs = 0;
    89   #endif // TASKQUEUE_STATS
    91   _survivor_chunk_array =
    92     (ChunkArray*) old_gen()->get_data_recorder(thread_num());
    93   _hash_seed = 17;  // Might want to take time-based random value.
    94   _start = os::elapsedTime();
    95   _old_gen_closure.set_generation(old_gen_);
    96   _old_gen_root_closure.set_generation(old_gen_);
    97 }
    98 #ifdef _MSC_VER
    99 #pragma warning( pop )
   100 #endif
   102 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
   103                                               size_t plab_word_size) {
   104   ChunkArray* sca = survivor_chunk_array();
   105   if (sca != NULL) {
   106     // A non-null SCA implies that we want the PLAB data recorded.
   107     sca->record_sample(plab_start, plab_word_size);
   108   }
   109 }
   111 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
   112   return new_obj->is_objArray() &&
   113          arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
   114          new_obj != old_obj;
   115 }
   117 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
   118   assert(old->is_objArray(), "must be obj array");
   119   assert(old->is_forwarded(), "must be forwarded");
   120   assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
   121   assert(!old_gen()->is_in(old), "must be in young generation.");
   123   objArrayOop obj = objArrayOop(old->forwardee());
   124   // Process ParGCArrayScanChunk elements now
   125   // and push the remainder back onto queue
   126   int start     = arrayOop(old)->length();
   127   int end       = obj->length();
   128   int remainder = end - start;
   129   assert(start <= end, "just checking");
   130   if (remainder > 2 * ParGCArrayScanChunk) {
   131     // Test above combines last partial chunk with a full chunk
   132     end = start + ParGCArrayScanChunk;
   133     arrayOop(old)->set_length(end);
   134     // Push remainder.
   135     bool ok = work_queue()->push(old);
   136     assert(ok, "just popped, push must be okay");
   137   } else {
   138     // Restore length so that it can be used if there
   139     // is a promotion failure and forwarding pointers
   140     // must be removed.
   141     arrayOop(old)->set_length(end);
   142   }
   144   // process our set of indices (include header in first chunk)
   145   // should make sure end is even (aligned to HeapWord in case of compressed oops)
   146   if ((HeapWord *)obj < young_old_boundary()) {
   147     // object is in to_space
   148     obj->oop_iterate_range(&_to_space_closure, start, end);
   149   } else {
   150     // object is in old generation
   151     obj->oop_iterate_range(&_old_gen_closure, start, end);
   152   }
   153 }
   156 void ParScanThreadState::trim_queues(int max_size) {
   157   ObjToScanQueue* queue = work_queue();
   158   do {
   159     while (queue->size() > (juint)max_size) {
   160       oop obj_to_scan;
   161       if (queue->pop_local(obj_to_scan)) {
   162         if ((HeapWord *)obj_to_scan < young_old_boundary()) {
   163           if (obj_to_scan->is_objArray() &&
   164               obj_to_scan->is_forwarded() &&
   165               obj_to_scan->forwardee() != obj_to_scan) {
   166             scan_partial_array_and_push_remainder(obj_to_scan);
   167           } else {
   168             // object is in to_space
   169             obj_to_scan->oop_iterate(&_to_space_closure);
   170           }
   171         } else {
   172           // object is in old generation
   173           obj_to_scan->oop_iterate(&_old_gen_closure);
   174         }
   175       }
   176     }
   177     // For the  case of compressed oops, we have a private, non-shared
   178     // overflow stack, so we eagerly drain it so as to more evenly
   179     // distribute load early. Note: this may be good to do in
   180     // general rather than delay for the final stealing phase.
   181     // If applicable, we'll transfer a set of objects over to our
   182     // work queue, allowing them to be stolen and draining our
   183     // private overflow stack.
   184   } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
   185 }
   187 bool ParScanThreadState::take_from_overflow_stack() {
   188   assert(ParGCUseLocalOverflow, "Else should not call");
   189   assert(young_gen()->overflow_list() == NULL, "Error");
   190   ObjToScanQueue* queue = work_queue();
   191   Stack<oop, mtGC>* const of_stack = overflow_stack();
   192   const size_t num_overflow_elems = of_stack->size();
   193   const size_t space_available = queue->max_elems() - queue->size();
   194   const size_t num_take_elems = MIN3(space_available / 4,
   195                                      ParGCDesiredObjsFromOverflowList,
   196                                      num_overflow_elems);
   197   // Transfer the most recent num_take_elems from the overflow
   198   // stack to our work queue.
   199   for (size_t i = 0; i != num_take_elems; i++) {
   200     oop cur = of_stack->pop();
   201     oop obj_to_push = cur->forwardee();
   202     assert(Universe::heap()->is_in_reserved(cur), "Should be in heap");
   203     assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
   204     assert(Universe::heap()->is_in_reserved(obj_to_push), "Should be in heap");
   205     if (should_be_partially_scanned(obj_to_push, cur)) {
   206       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
   207       obj_to_push = cur;
   208     }
   209     bool ok = queue->push(obj_to_push);
   210     assert(ok, "Should have succeeded");
   211   }
   212   assert(young_gen()->overflow_list() == NULL, "Error");
   213   return num_take_elems > 0;  // was something transferred?
   214 }
   216 void ParScanThreadState::push_on_overflow_stack(oop p) {
   217   assert(ParGCUseLocalOverflow, "Else should not call");
   218   overflow_stack()->push(p);
   219   assert(young_gen()->overflow_list() == NULL, "Error");
   220 }
   222 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
   224   // Otherwise, if the object is small enough, try to reallocate the
   225   // buffer.
   226   HeapWord* obj = NULL;
   227   if (!_to_space_full) {
   228     ParGCAllocBuffer* const plab = to_space_alloc_buffer();
   229     Space*            const sp   = to_space();
   230     if (word_sz * 100 <
   231         ParallelGCBufferWastePct * plab->word_sz()) {
   232       // Is small enough; abandon this buffer and start a new one.
   233       plab->retire(false, false);
   234       size_t buf_size = plab->word_sz();
   235       HeapWord* buf_space = sp->par_allocate(buf_size);
   236       if (buf_space == NULL) {
   237         const size_t min_bytes =
   238           ParGCAllocBuffer::min_size() << LogHeapWordSize;
   239         size_t free_bytes = sp->free();
   240         while(buf_space == NULL && free_bytes >= min_bytes) {
   241           buf_size = free_bytes >> LogHeapWordSize;
   242           assert(buf_size == (size_t)align_object_size(buf_size),
   243                  "Invariant");
   244           buf_space  = sp->par_allocate(buf_size);
   245           free_bytes = sp->free();
   246         }
   247       }
   248       if (buf_space != NULL) {
   249         plab->set_word_size(buf_size);
   250         plab->set_buf(buf_space);
   251         record_survivor_plab(buf_space, buf_size);
   252         obj = plab->allocate(word_sz);
   253         // Note that we cannot compare buf_size < word_sz below
   254         // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
   255         assert(obj != NULL || plab->words_remaining() < word_sz,
   256                "Else should have been able to allocate");
   257         // It's conceivable that we may be able to use the
   258         // buffer we just grabbed for subsequent small requests
   259         // even if not for this one.
   260       } else {
   261         // We're used up.
   262         _to_space_full = true;
   263       }
   265     } else {
   266       // Too large; allocate the object individually.
   267       obj = sp->par_allocate(word_sz);
   268     }
   269   }
   270   return obj;
   271 }
   274 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
   275                                                 size_t word_sz) {
   276   // Is the alloc in the current alloc buffer?
   277   if (to_space_alloc_buffer()->contains(obj)) {
   278     assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
   279            "Should contain whole object.");
   280     to_space_alloc_buffer()->undo_allocation(obj, word_sz);
   281   } else {
   282     CollectedHeap::fill_with_object(obj, word_sz);
   283   }
   284 }
   286 void ParScanThreadState::print_promotion_failure_size() {
   287   if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {
   288     gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
   289                         _thread_num, _promotion_failed_info.first_size());
   290   }
   291 }
   293 class ParScanThreadStateSet: private ResourceArray {
   294 public:
   295   // Initializes states for the specified number of threads;
   296   ParScanThreadStateSet(int                     num_threads,
   297                         Space&                  to_space,
   298                         ParNewGeneration&       gen,
   299                         Generation&             old_gen,
   300                         ObjToScanQueueSet&      queue_set,
   301                         Stack<oop, mtGC>*       overflow_stacks_,
   302                         size_t                  desired_plab_sz,
   303                         ParallelTaskTerminator& term);
   305   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
   307   inline ParScanThreadState& thread_state(int i);
   309   void trace_promotion_failed(YoungGCTracer& gc_tracer);
   310   void reset(int active_workers, bool promotion_failed);
   311   void flush();
   313   #if TASKQUEUE_STATS
   314   static void
   315     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
   316   void print_termination_stats(outputStream* const st = gclog_or_tty);
   317   static void
   318     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
   319   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
   320   void reset_stats();
   321   #endif // TASKQUEUE_STATS
   323 private:
   324   ParallelTaskTerminator& _term;
   325   ParNewGeneration&       _gen;
   326   Generation&             _next_gen;
   327  public:
   328   bool is_valid(int id) const { return id < length(); }
   329   ParallelTaskTerminator* terminator() { return &_term; }
   330 };
   333 ParScanThreadStateSet::ParScanThreadStateSet(
   334   int num_threads, Space& to_space, ParNewGeneration& gen,
   335   Generation& old_gen, ObjToScanQueueSet& queue_set,
   336   Stack<oop, mtGC>* overflow_stacks,
   337   size_t desired_plab_sz, ParallelTaskTerminator& term)
   338   : ResourceArray(sizeof(ParScanThreadState), num_threads),
   339     _gen(gen), _next_gen(old_gen), _term(term)
   340 {
   341   assert(num_threads > 0, "sanity check!");
   342   assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
   343          "overflow_stack allocation mismatch");
   344   // Initialize states.
   345   for (int i = 0; i < num_threads; ++i) {
   346     new ((ParScanThreadState*)_data + i)
   347         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
   348                            overflow_stacks, desired_plab_sz, term);
   349   }
   350 }
   352 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
   353 {
   354   assert(i >= 0 && i < length(), "sanity check!");
   355   return ((ParScanThreadState*)_data)[i];
   356 }
   358 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
   359   for (int i = 0; i < length(); ++i) {
   360     if (thread_state(i).promotion_failed()) {
   361       gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info());
   362       thread_state(i).promotion_failed_info().reset();
   363     }
   364   }
   365 }
   367 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
   368 {
   369   _term.reset_for_reuse(active_threads);
   370   if (promotion_failed) {
   371     for (int i = 0; i < length(); ++i) {
   372       thread_state(i).print_promotion_failure_size();
   373     }
   374   }
   375 }
   377 #if TASKQUEUE_STATS
   378 void
   379 ParScanThreadState::reset_stats()
   380 {
   381   taskqueue_stats().reset();
   382   _term_attempts = 0;
   383   _overflow_refills = 0;
   384   _overflow_refill_objs = 0;
   385 }
   387 void ParScanThreadStateSet::reset_stats()
   388 {
   389   for (int i = 0; i < length(); ++i) {
   390     thread_state(i).reset_stats();
   391   }
   392 }
   394 void
   395 ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st)
   396 {
   397   st->print_raw_cr("GC Termination Stats");
   398   st->print_raw_cr("     elapsed  --strong roots-- "
   399                    "-------termination-------");
   400   st->print_raw_cr("thr     ms        ms       %   "
   401                    "    ms       %   attempts");
   402   st->print_raw_cr("--- --------- --------- ------ "
   403                    "--------- ------ --------");
   404 }
   406 void ParScanThreadStateSet::print_termination_stats(outputStream* const st)
   407 {
   408   print_termination_stats_hdr(st);
   410   for (int i = 0; i < length(); ++i) {
   411     const ParScanThreadState & pss = thread_state(i);
   412     const double elapsed_ms = pss.elapsed_time() * 1000.0;
   413     const double s_roots_ms = pss.strong_roots_time() * 1000.0;
   414     const double term_ms = pss.term_time() * 1000.0;
   415     st->print_cr("%3d %9.2f %9.2f %6.2f "
   416                  "%9.2f %6.2f " SIZE_FORMAT_W(8),
   417                  i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
   418                  term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
   419   }
   420 }
   422 // Print stats related to work queue activity.
   423 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st)
   424 {
   425   st->print_raw_cr("GC Task Stats");
   426   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
   427   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
   428 }
   430 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st)
   431 {
   432   print_taskqueue_stats_hdr(st);
   434   TaskQueueStats totals;
   435   for (int i = 0; i < length(); ++i) {
   436     const ParScanThreadState & pss = thread_state(i);
   437     const TaskQueueStats & stats = pss.taskqueue_stats();
   438     st->print("%3d ", i); stats.print(st); st->cr();
   439     totals += stats;
   441     if (pss.overflow_refills() > 0) {
   442       st->print_cr("    " SIZE_FORMAT_W(10) " overflow refills    "
   443                    SIZE_FORMAT_W(10) " overflow objects",
   444                    pss.overflow_refills(), pss.overflow_refill_objs());
   445     }
   446   }
   447   st->print("tot "); totals.print(st); st->cr();
   449   DEBUG_ONLY(totals.verify());
   450 }
   451 #endif // TASKQUEUE_STATS
   453 void ParScanThreadStateSet::flush()
   454 {
   455   // Work in this loop should be kept as lightweight as
   456   // possible since this might otherwise become a bottleneck
   457   // to scaling. Should we add heavy-weight work into this
   458   // loop, consider parallelizing the loop into the worker threads.
   459   for (int i = 0; i < length(); ++i) {
   460     ParScanThreadState& par_scan_state = thread_state(i);
   462     // Flush stats related to To-space PLAB activity and
   463     // retire the last buffer.
   464     par_scan_state.to_space_alloc_buffer()->
   465       flush_stats_and_retire(_gen.plab_stats(),
   466                              true /* end_of_gc */,
   467                              false /* retain */);
   469     // Every thread has its own age table.  We need to merge
   470     // them all into one.
   471     ageTable *local_table = par_scan_state.age_table();
   472     _gen.age_table()->merge(local_table);
   474     // Inform old gen that we're done.
   475     _next_gen.par_promote_alloc_done(i);
   476     _next_gen.par_oop_since_save_marks_iterate_done(i);
   477   }
   479   if (UseConcMarkSweepGC && ParallelGCThreads > 0) {
   480     // We need to call this even when ResizeOldPLAB is disabled
   481     // so as to avoid breaking some asserts. While we may be able
   482     // to avoid this by reorganizing the code a bit, I am loathe
   483     // to do that unless we find cases where ergo leads to bad
   484     // performance.
   485     CFLS_LAB::compute_desired_plab_size();
   486   }
   487 }
   489 ParScanClosure::ParScanClosure(ParNewGeneration* g,
   490                                ParScanThreadState* par_scan_state) :
   491   OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g)
   492 {
   493   assert(_g->level() == 0, "Optimized for youngest generation");
   494   _boundary = _g->reserved().end();
   495 }
   497 void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
   498 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
   500 void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
   501 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
   503 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
   504 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
   506 void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
   507 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
   509 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
   510                                              ParScanThreadState* par_scan_state)
   511   : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
   512 {}
   514 void ParScanWeakRefClosure::do_oop(oop* p)       { ParScanWeakRefClosure::do_oop_work(p); }
   515 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
   517 #ifdef WIN32
   518 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
   519 #endif
   521 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
   522     ParScanThreadState* par_scan_state_,
   523     ParScanWithoutBarrierClosure* to_space_closure_,
   524     ParScanWithBarrierClosure* old_gen_closure_,
   525     ParRootScanWithoutBarrierClosure* to_space_root_closure_,
   526     ParNewGeneration* par_gen_,
   527     ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
   528     ObjToScanQueueSet* task_queues_,
   529     ParallelTaskTerminator* terminator_) :
   531     _par_scan_state(par_scan_state_),
   532     _to_space_closure(to_space_closure_),
   533     _old_gen_closure(old_gen_closure_),
   534     _to_space_root_closure(to_space_root_closure_),
   535     _old_gen_root_closure(old_gen_root_closure_),
   536     _par_gen(par_gen_),
   537     _task_queues(task_queues_),
   538     _terminator(terminator_)
   539 {}
   541 void ParEvacuateFollowersClosure::do_void() {
   542   ObjToScanQueue* work_q = par_scan_state()->work_queue();
   544   while (true) {
   546     // Scan to-space and old-gen objs until we run out of both.
   547     oop obj_to_scan;
   548     par_scan_state()->trim_queues(0);
   550     // We have no local work, attempt to steal from other threads.
   552     // attempt to steal work from promoted.
   553     if (task_queues()->steal(par_scan_state()->thread_num(),
   554                              par_scan_state()->hash_seed(),
   555                              obj_to_scan)) {
   556       bool res = work_q->push(obj_to_scan);
   557       assert(res, "Empty queue should have room for a push.");
   559       //   if successful, goto Start.
   560       continue;
   562       // try global overflow list.
   563     } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
   564       continue;
   565     }
   567     // Otherwise, offer termination.
   568     par_scan_state()->start_term_time();
   569     if (terminator()->offer_termination()) break;
   570     par_scan_state()->end_term_time();
   571   }
   572   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
   573          "Broken overflow list?");
   574   // Finish the last termination pause.
   575   par_scan_state()->end_term_time();
   576 }
   578 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
   579                 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
   580     AbstractGangTask("ParNewGeneration collection"),
   581     _gen(gen), _next_gen(next_gen),
   582     _young_old_boundary(young_old_boundary),
   583     _state_set(state_set)
   584   {}
   586 // Reset the terminator for the given number of
   587 // active threads.
   588 void ParNewGenTask::set_for_termination(int active_workers) {
   589   _state_set->reset(active_workers, _gen->promotion_failed());
   590   // Should the heap be passed in?  There's only 1 for now so
   591   // grab it instead.
   592   GenCollectedHeap* gch = GenCollectedHeap::heap();
   593   gch->set_n_termination(active_workers);
   594 }
   596 void ParNewGenTask::work(uint worker_id) {
   597   GenCollectedHeap* gch = GenCollectedHeap::heap();
   598   // Since this is being done in a separate thread, need new resource
   599   // and handle marks.
   600   ResourceMark rm;
   601   HandleMark hm;
   602   // We would need multiple old-gen queues otherwise.
   603   assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
   605   Generation* old_gen = gch->next_gen(_gen);
   607   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
   608   assert(_state_set->is_valid(worker_id), "Should not have been called");
   610   par_scan_state.set_young_old_boundary(_young_old_boundary);
   612   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
   613                                       gch->rem_set()->klass_rem_set());
   615   int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
   617   par_scan_state.start_strong_roots();
   618   gch->gen_process_strong_roots(_gen->level(),
   619                                 true,  // Process younger gens, if any,
   620                                        // as strong roots.
   621                                 false, // no scope; this is parallel code
   622                                 true,  // is scavenging
   623                                 SharedHeap::ScanningOption(so),
   624                                 &par_scan_state.to_space_root_closure(),
   625                                 true,   // walk *all* scavengable nmethods
   626                                 &par_scan_state.older_gen_closure(),
   627                                 &klass_scan_closure);
   628   par_scan_state.end_strong_roots();
   630   // "evacuate followers".
   631   par_scan_state.evacuate_followers_closure().do_void();
   632 }
   634 #ifdef _MSC_VER
   635 #pragma warning( push )
   636 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   637 #endif
   638 ParNewGeneration::
   639 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
   640   : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
   641   _overflow_list(NULL),
   642   _is_alive_closure(this),
   643   _plab_stats(YoungPLABSize, PLABWeight)
   644 {
   645   NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
   646   NOT_PRODUCT(_num_par_pushes = 0;)
   647   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
   648   guarantee(_task_queues != NULL, "task_queues allocation failure.");
   650   for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
   651     ObjToScanQueue *q = new ObjToScanQueue();
   652     guarantee(q != NULL, "work_queue Allocation failure.");
   653     _task_queues->register_queue(i1, q);
   654   }
   656   for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
   657     _task_queues->queue(i2)->initialize();
   659   _overflow_stacks = NULL;
   660   if (ParGCUseLocalOverflow) {
   662     // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal
   663     // with ','
   664     typedef Stack<oop, mtGC> GCOopStack;
   666     _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
   667     for (size_t i = 0; i < ParallelGCThreads; ++i) {
   668       new (_overflow_stacks + i) Stack<oop, mtGC>();
   669     }
   670   }
   672   if (UsePerfData) {
   673     EXCEPTION_MARK;
   674     ResourceMark rm;
   676     const char* cname =
   677          PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
   678     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
   679                                      ParallelGCThreads, CHECK);
   680   }
   681 }
   682 #ifdef _MSC_VER
   683 #pragma warning( pop )
   684 #endif
   686 // ParNewGeneration::
   687 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
   688   DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
   690 template <class T>
   691 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
   692 #ifdef ASSERT
   693   {
   694     assert(!oopDesc::is_null(*p), "expected non-null ref");
   695     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   696     // We never expect to see a null reference being processed
   697     // as a weak reference.
   698     assert(obj->is_oop(), "expected an oop while scanning weak refs");
   699   }
   700 #endif // ASSERT
   702   _par_cl->do_oop_nv(p);
   704   if (Universe::heap()->is_in_reserved(p)) {
   705     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   706     _rs->write_ref_field_gc_par(p, obj);
   707   }
   708 }
   710 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p)       { ParKeepAliveClosure::do_oop_work(p); }
   711 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
   713 // ParNewGeneration::
   714 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
   715   DefNewGeneration::KeepAliveClosure(cl) {}
   717 template <class T>
   718 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
   719 #ifdef ASSERT
   720   {
   721     assert(!oopDesc::is_null(*p), "expected non-null ref");
   722     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   723     // We never expect to see a null reference being processed
   724     // as a weak reference.
   725     assert(obj->is_oop(), "expected an oop while scanning weak refs");
   726   }
   727 #endif // ASSERT
   729   _cl->do_oop_nv(p);
   731   if (Universe::heap()->is_in_reserved(p)) {
   732     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   733     _rs->write_ref_field_gc_par(p, obj);
   734   }
   735 }
   737 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p)       { KeepAliveClosure::do_oop_work(p); }
   738 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
   740 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
   741   T heap_oop = oopDesc::load_heap_oop(p);
   742   if (!oopDesc::is_null(heap_oop)) {
   743     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
   744     if ((HeapWord*)obj < _boundary) {
   745       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
   746       oop new_obj = obj->is_forwarded()
   747                       ? obj->forwardee()
   748                       : _g->DefNewGeneration::copy_to_survivor_space(obj);
   749       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
   750     }
   751     if (_gc_barrier) {
   752       // If p points to a younger generation, mark the card.
   753       if ((HeapWord*)obj < _gen_boundary) {
   754         _rs->write_ref_field_gc_par(p, obj);
   755       }
   756     }
   757   }
   758 }
   760 void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
   761 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
   763 class ParNewRefProcTaskProxy: public AbstractGangTask {
   764   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   765 public:
   766   ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,
   767                          Generation& next_gen,
   768                          HeapWord* young_old_boundary,
   769                          ParScanThreadStateSet& state_set);
   771 private:
   772   virtual void work(uint worker_id);
   773   virtual void set_for_termination(int active_workers) {
   774     _state_set.terminator()->reset_for_reuse(active_workers);
   775   }
   776 private:
   777   ParNewGeneration&      _gen;
   778   ProcessTask&           _task;
   779   Generation&            _next_gen;
   780   HeapWord*              _young_old_boundary;
   781   ParScanThreadStateSet& _state_set;
   782 };
   784 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
   785     ProcessTask& task, ParNewGeneration& gen,
   786     Generation& next_gen,
   787     HeapWord* young_old_boundary,
   788     ParScanThreadStateSet& state_set)
   789   : AbstractGangTask("ParNewGeneration parallel reference processing"),
   790     _gen(gen),
   791     _task(task),
   792     _next_gen(next_gen),
   793     _young_old_boundary(young_old_boundary),
   794     _state_set(state_set)
   795 {
   796 }
   798 void ParNewRefProcTaskProxy::work(uint worker_id)
   799 {
   800   ResourceMark rm;
   801   HandleMark hm;
   802   ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
   803   par_scan_state.set_young_old_boundary(_young_old_boundary);
   804   _task.work(worker_id, par_scan_state.is_alive_closure(),
   805              par_scan_state.keep_alive_closure(),
   806              par_scan_state.evacuate_followers_closure());
   807 }
   809 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
   810   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   811   EnqueueTask& _task;
   813 public:
   814   ParNewRefEnqueueTaskProxy(EnqueueTask& task)
   815     : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
   816       _task(task)
   817   { }
   819   virtual void work(uint worker_id)
   820   {
   821     _task.work(worker_id);
   822   }
   823 };
   826 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
   827 {
   828   GenCollectedHeap* gch = GenCollectedHeap::heap();
   829   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
   830          "not a generational heap");
   831   FlexibleWorkGang* workers = gch->workers();
   832   assert(workers != NULL, "Need parallel worker threads.");
   833   _state_set.reset(workers->active_workers(), _generation.promotion_failed());
   834   ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
   835                                  _generation.reserved().end(), _state_set);
   836   workers->run_task(&rp_task);
   837   _state_set.reset(0 /* bad value in debug if not reset */,
   838                    _generation.promotion_failed());
   839 }
   841 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
   842 {
   843   GenCollectedHeap* gch = GenCollectedHeap::heap();
   844   FlexibleWorkGang* workers = gch->workers();
   845   assert(workers != NULL, "Need parallel worker threads.");
   846   ParNewRefEnqueueTaskProxy enq_task(task);
   847   workers->run_task(&enq_task);
   848 }
   850 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
   851 {
   852   _state_set.flush();
   853   GenCollectedHeap* gch = GenCollectedHeap::heap();
   854   gch->set_par_threads(0);  // 0 ==> non-parallel.
   855   gch->save_marks();
   856 }
   858 ScanClosureWithParBarrier::
   859 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
   860   ScanClosure(g, gc_barrier) {}
   862 EvacuateFollowersClosureGeneral::
   863 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
   864                                 OopsInGenClosure* cur,
   865                                 OopsInGenClosure* older) :
   866   _gch(gch), _level(level),
   867   _scan_cur_or_nonheap(cur), _scan_older(older)
   868 {}
   870 void EvacuateFollowersClosureGeneral::do_void() {
   871   do {
   872     // Beware: this call will lead to closure applications via virtual
   873     // calls.
   874     _gch->oop_since_save_marks_iterate(_level,
   875                                        _scan_cur_or_nonheap,
   876                                        _scan_older);
   877   } while (!_gch->no_allocs_since_save_marks(_level));
   878 }
   881 // A Generation that does parallel young-gen collection.
   883 bool ParNewGeneration::_avoid_promotion_undo = false;
   885 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
   886   assert(_promo_failure_scan_stack.is_empty(), "post condition");
   887   _promo_failure_scan_stack.clear(true); // Clear cached segments.
   889   remove_forwarding_pointers();
   890   if (PrintGCDetails) {
   891     gclog_or_tty->print(" (promotion failed)");
   892   }
   893   // All the spaces are in play for mark-sweep.
   894   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
   895   from()->set_next_compaction_space(to());
   896   gch->set_incremental_collection_failed();
   897   // Inform the next generation that a promotion failure occurred.
   898   _next_gen->promotion_failure_occurred();
   900   // Trace promotion failure in the parallel GC threads
   901   thread_state_set.trace_promotion_failed(gc_tracer);
   902   // Single threaded code may have reported promotion failure to the global state
   903   if (_promotion_failed_info.has_failed()) {
   904     gc_tracer.report_promotion_failed(_promotion_failed_info);
   905   }
   906   // Reset the PromotionFailureALot counters.
   907   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   908 }
   910 void ParNewGeneration::collect(bool   full,
   911                                bool   clear_all_soft_refs,
   912                                size_t size,
   913                                bool   is_tlab) {
   914   assert(full || size > 0, "otherwise we don't want to collect");
   916   GenCollectedHeap* gch = GenCollectedHeap::heap();
   918   _gc_timer->register_gc_start(os::elapsed_counter());
   920   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
   921     "not a CMS generational heap");
   922   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
   923   FlexibleWorkGang* workers = gch->workers();
   924   assert(workers != NULL, "Need workgang for parallel work");
   925   int active_workers =
   926       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
   927                                    workers->active_workers(),
   928                                    Threads::number_of_non_daemon_threads());
   929   workers->set_active_workers(active_workers);
   930   assert(gch->n_gens() == 2,
   931          "Par collection currently only works with single older gen.");
   932   _next_gen = gch->next_gen(this);
   933   // Do we have to avoid promotion_undo?
   934   if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
   935     set_avoid_promotion_undo(true);
   936   }
   938   // If the next generation is too full to accommodate worst-case promotion
   939   // from this generation, pass on collection; let the next generation
   940   // do it.
   941   if (!collection_attempt_is_safe()) {
   942     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
   943     return;
   944   }
   945   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
   947   ParNewTracer gc_tracer;
   948   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
   949   gch->trace_heap_before_gc(&gc_tracer);
   951   init_assuming_no_promotion_failure();
   953   if (UseAdaptiveSizePolicy) {
   954     set_survivor_overflow(false);
   955     size_policy->minor_collection_begin();
   956   }
   958   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
   959   // Capture heap used before collection (for printing).
   960   size_t gch_prev_used = gch->used();
   962   SpecializationStats::clear();
   964   age_table()->clear();
   965   to()->clear(SpaceDecorator::Mangle);
   967   gch->save_marks();
   968   assert(workers != NULL, "Need parallel worker threads.");
   969   int n_workers = active_workers;
   971   // Set the correct parallelism (number of queues) in the reference processor
   972   ref_processor()->set_active_mt_degree(n_workers);
   974   // Always set the terminator for the active number of workers
   975   // because only those workers go through the termination protocol.
   976   ParallelTaskTerminator _term(n_workers, task_queues());
   977   ParScanThreadStateSet thread_state_set(workers->active_workers(),
   978                                          *to(), *this, *_next_gen, *task_queues(),
   979                                          _overflow_stacks, desired_plab_sz(), _term);
   981   ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
   982   gch->set_par_threads(n_workers);
   983   gch->rem_set()->prepare_for_younger_refs_iterate(true);
   984   // It turns out that even when we're using 1 thread, doing the work in a
   985   // separate thread causes wide variance in run times.  We can't help this
   986   // in the multi-threaded case, but we special-case n=1 here to get
   987   // repeatable measurements of the 1-thread overhead of the parallel code.
   988   if (n_workers > 1) {
   989     GenCollectedHeap::StrongRootsScope srs(gch);
   990     workers->run_task(&tsk);
   991   } else {
   992     GenCollectedHeap::StrongRootsScope srs(gch);
   993     tsk.work(0);
   994   }
   995   thread_state_set.reset(0 /* Bad value in debug if not reset */,
   996                          promotion_failed());
   998   // Process (weak) reference objects found during scavenge.
   999   ReferenceProcessor* rp = ref_processor();
  1000   IsAliveClosure is_alive(this);
  1001   ScanWeakRefClosure scan_weak_ref(this);
  1002   KeepAliveClosure keep_alive(&scan_weak_ref);
  1003   ScanClosure               scan_without_gc_barrier(this, false);
  1004   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
  1005   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
  1006   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
  1007     &scan_without_gc_barrier, &scan_with_gc_barrier);
  1008   rp->setup_policy(clear_all_soft_refs);
  1009   // Can  the mt_degree be set later (at run_task() time would be best)?
  1010   rp->set_active_mt_degree(active_workers);
  1011   ReferenceProcessorStats stats;
  1012   if (rp->processing_is_mt()) {
  1013     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
  1014     stats = rp->process_discovered_references(&is_alive, &keep_alive,
  1015                                               &evacuate_followers, &task_executor,
  1016                                               _gc_timer);
  1017   } else {
  1018     thread_state_set.flush();
  1019     gch->set_par_threads(0);  // 0 ==> non-parallel.
  1020     gch->save_marks();
  1021     stats = rp->process_discovered_references(&is_alive, &keep_alive,
  1022                                               &evacuate_followers, NULL,
  1023                                               _gc_timer);
  1025   gc_tracer.report_gc_reference_stats(stats);
  1026   if (!promotion_failed()) {
  1027     // Swap the survivor spaces.
  1028     eden()->clear(SpaceDecorator::Mangle);
  1029     from()->clear(SpaceDecorator::Mangle);
  1030     if (ZapUnusedHeapArea) {
  1031       // This is now done here because of the piece-meal mangling which
  1032       // can check for valid mangling at intermediate points in the
  1033       // collection(s).  When a minor collection fails to collect
  1034       // sufficient space resizing of the young generation can occur
  1035       // an redistribute the spaces in the young generation.  Mangle
  1036       // here so that unzapped regions don't get distributed to
  1037       // other spaces.
  1038       to()->mangle_unused_area();
  1040     swap_spaces();
  1042     // A successful scavenge should restart the GC time limit count which is
  1043     // for full GC's.
  1044     size_policy->reset_gc_overhead_limit_count();
  1046     assert(to()->is_empty(), "to space should be empty now");
  1048     adjust_desired_tenuring_threshold();
  1049   } else {
  1050     handle_promotion_failed(gch, thread_state_set, gc_tracer);
  1052   // set new iteration safe limit for the survivor spaces
  1053   from()->set_concurrent_iteration_safe_limit(from()->top());
  1054   to()->set_concurrent_iteration_safe_limit(to()->top());
  1056   if (ResizePLAB) {
  1057     plab_stats()->adjust_desired_plab_sz(n_workers);
  1060   if (PrintGC && !PrintGCDetails) {
  1061     gch->print_heap_change(gch_prev_used);
  1064   if (PrintGCDetails && ParallelGCVerbose) {
  1065     TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
  1066     TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
  1069   if (UseAdaptiveSizePolicy) {
  1070     size_policy->minor_collection_end(gch->gc_cause());
  1071     size_policy->avg_survived()->sample(from()->used());
  1074   // We need to use a monotonically non-deccreasing time in ms
  1075   // or we will see time-warp warnings and os::javaTimeMillis()
  1076   // does not guarantee monotonicity.
  1077   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  1078   update_time_of_last_gc(now);
  1080   SpecializationStats::print();
  1082   rp->set_enqueuing_is_done(true);
  1083   if (rp->processing_is_mt()) {
  1084     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
  1085     rp->enqueue_discovered_references(&task_executor);
  1086   } else {
  1087     rp->enqueue_discovered_references(NULL);
  1089   rp->verify_no_references_recorded();
  1091   gch->trace_heap_after_gc(&gc_tracer);
  1092   gc_tracer.report_tenuring_threshold(tenuring_threshold());
  1094   _gc_timer->register_gc_end(os::elapsed_counter());
  1096   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
  1099 static int sum;
  1100 void ParNewGeneration::waste_some_time() {
  1101   for (int i = 0; i < 100; i++) {
  1102     sum += i;
  1106 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
  1108 // Because of concurrency, there are times where an object for which
  1109 // "is_forwarded()" is true contains an "interim" forwarding pointer
  1110 // value.  Such a value will soon be overwritten with a real value.
  1111 // This method requires "obj" to have a forwarding pointer, and waits, if
  1112 // necessary for a real one to be inserted, and returns it.
  1114 oop ParNewGeneration::real_forwardee(oop obj) {
  1115   oop forward_ptr = obj->forwardee();
  1116   if (forward_ptr != ClaimedForwardPtr) {
  1117     return forward_ptr;
  1118   } else {
  1119     return real_forwardee_slow(obj);
  1123 oop ParNewGeneration::real_forwardee_slow(oop obj) {
  1124   // Spin-read if it is claimed but not yet written by another thread.
  1125   oop forward_ptr = obj->forwardee();
  1126   while (forward_ptr == ClaimedForwardPtr) {
  1127     waste_some_time();
  1128     assert(obj->is_forwarded(), "precondition");
  1129     forward_ptr = obj->forwardee();
  1131   return forward_ptr;
  1134 #ifdef ASSERT
  1135 bool ParNewGeneration::is_legal_forward_ptr(oop p) {
  1136   return
  1137     (_avoid_promotion_undo && p == ClaimedForwardPtr)
  1138     || Universe::heap()->is_in_reserved(p);
  1140 #endif
  1142 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
  1143   if (m->must_be_preserved_for_promotion_failure(obj)) {
  1144     // We should really have separate per-worker stacks, rather
  1145     // than use locking of a common pair of stacks.
  1146     MutexLocker ml(ParGCRareEvent_lock);
  1147     preserve_mark(obj, m);
  1151 // Multiple GC threads may try to promote an object.  If the object
  1152 // is successfully promoted, a forwarding pointer will be installed in
  1153 // the object in the young generation.  This method claims the right
  1154 // to install the forwarding pointer before it copies the object,
  1155 // thus avoiding the need to undo the copy as in
  1156 // copy_to_survivor_space_avoiding_with_undo.
  1158 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
  1159         ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
  1160   // In the sequential version, this assert also says that the object is
  1161   // not forwarded.  That might not be the case here.  It is the case that
  1162   // the caller observed it to be not forwarded at some time in the past.
  1163   assert(is_in_reserved(old), "shouldn't be scavenging this oop");
  1165   // The sequential code read "old->age()" below.  That doesn't work here,
  1166   // since the age is in the mark word, and that might be overwritten with
  1167   // a forwarding pointer by a parallel thread.  So we must save the mark
  1168   // word in a local and then analyze it.
  1169   oopDesc dummyOld;
  1170   dummyOld.set_mark(m);
  1171   assert(!dummyOld.is_forwarded(),
  1172          "should not be called with forwarding pointer mark word.");
  1174   oop new_obj = NULL;
  1175   oop forward_ptr;
  1177   // Try allocating obj in to-space (unless too old)
  1178   if (dummyOld.age() < tenuring_threshold()) {
  1179     new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
  1180     if (new_obj == NULL) {
  1181       set_survivor_overflow(true);
  1185   if (new_obj == NULL) {
  1186     // Either to-space is full or we decided to promote
  1187     // try allocating obj tenured
  1189     // Attempt to install a null forwarding pointer (atomically),
  1190     // to claim the right to install the real forwarding pointer.
  1191     forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
  1192     if (forward_ptr != NULL) {
  1193       // someone else beat us to it.
  1194         return real_forwardee(old);
  1197     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
  1198                                        old, m, sz);
  1200     if (new_obj == NULL) {
  1201       // promotion failed, forward to self
  1202       _promotion_failed = true;
  1203       new_obj = old;
  1205       preserve_mark_if_necessary(old, m);
  1206       par_scan_state->register_promotion_failure(sz);
  1209     old->forward_to(new_obj);
  1210     forward_ptr = NULL;
  1211   } else {
  1212     // Is in to-space; do copying ourselves.
  1213     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
  1214     forward_ptr = old->forward_to_atomic(new_obj);
  1215     // Restore the mark word copied above.
  1216     new_obj->set_mark(m);
  1217     // Increment age if obj still in new generation
  1218     new_obj->incr_age();
  1219     par_scan_state->age_table()->add(new_obj, sz);
  1221   assert(new_obj != NULL, "just checking");
  1223 #ifndef PRODUCT
  1224   // This code must come after the CAS test, or it will print incorrect
  1225   // information.
  1226   if (TraceScavenge) {
  1227     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
  1228        is_in_reserved(new_obj) ? "copying" : "tenuring",
  1229        new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
  1231 #endif
  1233   if (forward_ptr == NULL) {
  1234     oop obj_to_push = new_obj;
  1235     if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
  1236       // Length field used as index of next element to be scanned.
  1237       // Real length can be obtained from real_forwardee()
  1238       arrayOop(old)->set_length(0);
  1239       obj_to_push = old;
  1240       assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
  1241              "push forwarded object");
  1243     // Push it on one of the queues of to-be-scanned objects.
  1244     bool simulate_overflow = false;
  1245     NOT_PRODUCT(
  1246       if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
  1247         // simulate a stack overflow
  1248         simulate_overflow = true;
  1251     if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
  1252       // Add stats for overflow pushes.
  1253       if (Verbose && PrintGCDetails) {
  1254         gclog_or_tty->print("queue overflow!\n");
  1256       push_on_overflow_list(old, par_scan_state);
  1257       TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
  1260     return new_obj;
  1263   // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
  1264   // allocate it?
  1265   if (is_in_reserved(new_obj)) {
  1266     // Must be in to_space.
  1267     assert(to()->is_in_reserved(new_obj), "Checking");
  1268     if (forward_ptr == ClaimedForwardPtr) {
  1269       // Wait to get the real forwarding pointer value.
  1270       forward_ptr = real_forwardee(old);
  1272     par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
  1275   return forward_ptr;
  1279 // Multiple GC threads may try to promote the same object.  If two
  1280 // or more GC threads copy the object, only one wins the race to install
  1281 // the forwarding pointer.  The other threads have to undo their copy.
  1283 oop ParNewGeneration::copy_to_survivor_space_with_undo(
  1284         ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
  1286   // In the sequential version, this assert also says that the object is
  1287   // not forwarded.  That might not be the case here.  It is the case that
  1288   // the caller observed it to be not forwarded at some time in the past.
  1289   assert(is_in_reserved(old), "shouldn't be scavenging this oop");
  1291   // The sequential code read "old->age()" below.  That doesn't work here,
  1292   // since the age is in the mark word, and that might be overwritten with
  1293   // a forwarding pointer by a parallel thread.  So we must save the mark
  1294   // word here, install it in a local oopDesc, and then analyze it.
  1295   oopDesc dummyOld;
  1296   dummyOld.set_mark(m);
  1297   assert(!dummyOld.is_forwarded(),
  1298          "should not be called with forwarding pointer mark word.");
  1300   bool failed_to_promote = false;
  1301   oop new_obj = NULL;
  1302   oop forward_ptr;
  1304   // Try allocating obj in to-space (unless too old)
  1305   if (dummyOld.age() < tenuring_threshold()) {
  1306     new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
  1307     if (new_obj == NULL) {
  1308       set_survivor_overflow(true);
  1312   if (new_obj == NULL) {
  1313     // Either to-space is full or we decided to promote
  1314     // try allocating obj tenured
  1315     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
  1316                                        old, m, sz);
  1318     if (new_obj == NULL) {
  1319       // promotion failed, forward to self
  1320       forward_ptr = old->forward_to_atomic(old);
  1321       new_obj = old;
  1323       if (forward_ptr != NULL) {
  1324         return forward_ptr;   // someone else succeeded
  1327       _promotion_failed = true;
  1328       failed_to_promote = true;
  1330       preserve_mark_if_necessary(old, m);
  1331       par_scan_state->register_promotion_failure(sz);
  1333   } else {
  1334     // Is in to-space; do copying ourselves.
  1335     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
  1336     // Restore the mark word copied above.
  1337     new_obj->set_mark(m);
  1338     // Increment age if new_obj still in new generation
  1339     new_obj->incr_age();
  1340     par_scan_state->age_table()->add(new_obj, sz);
  1342   assert(new_obj != NULL, "just checking");
  1344 #ifndef PRODUCT
  1345   // This code must come after the CAS test, or it will print incorrect
  1346   // information.
  1347   if (TraceScavenge) {
  1348     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
  1349        is_in_reserved(new_obj) ? "copying" : "tenuring",
  1350        new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
  1352 #endif
  1354   // Now attempt to install the forwarding pointer (atomically).
  1355   // We have to copy the mark word before overwriting with forwarding
  1356   // ptr, so we can restore it below in the copy.
  1357   if (!failed_to_promote) {
  1358     forward_ptr = old->forward_to_atomic(new_obj);
  1361   if (forward_ptr == NULL) {
  1362     oop obj_to_push = new_obj;
  1363     if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
  1364       // Length field used as index of next element to be scanned.
  1365       // Real length can be obtained from real_forwardee()
  1366       arrayOop(old)->set_length(0);
  1367       obj_to_push = old;
  1368       assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
  1369              "push forwarded object");
  1371     // Push it on one of the queues of to-be-scanned objects.
  1372     bool simulate_overflow = false;
  1373     NOT_PRODUCT(
  1374       if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
  1375         // simulate a stack overflow
  1376         simulate_overflow = true;
  1379     if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
  1380       // Add stats for overflow pushes.
  1381       push_on_overflow_list(old, par_scan_state);
  1382       TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
  1385     return new_obj;
  1388   // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
  1389   // allocate it?
  1390   if (is_in_reserved(new_obj)) {
  1391     // Must be in to_space.
  1392     assert(to()->is_in_reserved(new_obj), "Checking");
  1393     par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
  1394   } else {
  1395     assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
  1396     _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
  1397                                       (HeapWord*)new_obj, sz);
  1400   return forward_ptr;
  1403 #ifndef PRODUCT
  1404 // It's OK to call this multi-threaded;  the worst thing
  1405 // that can happen is that we'll get a bunch of closely
  1406 // spaced simulated oveflows, but that's OK, in fact
  1407 // probably good as it would exercise the overflow code
  1408 // under contention.
  1409 bool ParNewGeneration::should_simulate_overflow() {
  1410   if (_overflow_counter-- <= 0) { // just being defensive
  1411     _overflow_counter = ParGCWorkQueueOverflowInterval;
  1412     return true;
  1413   } else {
  1414     return false;
  1417 #endif
  1419 // In case we are using compressed oops, we need to be careful.
  1420 // If the object being pushed is an object array, then its length
  1421 // field keeps track of the "grey boundary" at which the next
  1422 // incremental scan will be done (see ParGCArrayScanChunk).
  1423 // When using compressed oops, this length field is kept in the
  1424 // lower 32 bits of the erstwhile klass word and cannot be used
  1425 // for the overflow chaining pointer (OCP below). As such the OCP
  1426 // would itself need to be compressed into the top 32-bits in this
  1427 // case. Unfortunately, see below, in the event that we have a
  1428 // promotion failure, the node to be pushed on the list can be
  1429 // outside of the Java heap, so the heap-based pointer compression
  1430 // would not work (we would have potential aliasing between C-heap
  1431 // and Java-heap pointers). For this reason, when using compressed
  1432 // oops, we simply use a worker-thread-local, non-shared overflow
  1433 // list in the form of a growable array, with a slightly different
  1434 // overflow stack draining strategy. If/when we start using fat
  1435 // stacks here, we can go back to using (fat) pointer chains
  1436 // (although some performance comparisons would be useful since
  1437 // single global lists have their own performance disadvantages
  1438 // as we were made painfully aware not long ago, see 6786503).
  1439 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
  1440 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
  1441   assert(is_in_reserved(from_space_obj), "Should be from this generation");
  1442   if (ParGCUseLocalOverflow) {
  1443     // In the case of compressed oops, we use a private, not-shared
  1444     // overflow stack.
  1445     par_scan_state->push_on_overflow_stack(from_space_obj);
  1446   } else {
  1447     assert(!UseCompressedOops, "Error");
  1448     // if the object has been forwarded to itself, then we cannot
  1449     // use the klass pointer for the linked list.  Instead we have
  1450     // to allocate an oopDesc in the C-Heap and use that for the linked list.
  1451     // XXX This is horribly inefficient when a promotion failure occurs
  1452     // and should be fixed. XXX FIX ME !!!
  1453 #ifndef PRODUCT
  1454     Atomic::inc_ptr(&_num_par_pushes);
  1455     assert(_num_par_pushes > 0, "Tautology");
  1456 #endif
  1457     if (from_space_obj->forwardee() == from_space_obj) {
  1458       oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
  1459       listhead->forward_to(from_space_obj);
  1460       from_space_obj = listhead;
  1462     oop observed_overflow_list = _overflow_list;
  1463     oop cur_overflow_list;
  1464     do {
  1465       cur_overflow_list = observed_overflow_list;
  1466       if (cur_overflow_list != BUSY) {
  1467         from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
  1468       } else {
  1469         from_space_obj->set_klass_to_list_ptr(NULL);
  1471       observed_overflow_list =
  1472         (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
  1473     } while (cur_overflow_list != observed_overflow_list);
  1477 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
  1478   bool res;
  1480   if (ParGCUseLocalOverflow) {
  1481     res = par_scan_state->take_from_overflow_stack();
  1482   } else {
  1483     assert(!UseCompressedOops, "Error");
  1484     res = take_from_overflow_list_work(par_scan_state);
  1486   return res;
  1490 // *NOTE*: The overflow list manipulation code here and
  1491 // in CMSCollector:: are very similar in shape,
  1492 // except that in the CMS case we thread the objects
  1493 // directly into the list via their mark word, and do
  1494 // not need to deal with special cases below related
  1495 // to chunking of object arrays and promotion failure
  1496 // handling.
  1497 // CR 6797058 has been filed to attempt consolidation of
  1498 // the common code.
  1499 // Because of the common code, if you make any changes in
  1500 // the code below, please check the CMS version to see if
  1501 // similar changes might be needed.
  1502 // See CMSCollector::par_take_from_overflow_list() for
  1503 // more extensive documentation comments.
  1504 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
  1505   ObjToScanQueue* work_q = par_scan_state->work_queue();
  1506   // How many to take?
  1507   size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
  1508                                  (size_t)ParGCDesiredObjsFromOverflowList);
  1510   assert(!UseCompressedOops, "Error");
  1511   assert(par_scan_state->overflow_stack() == NULL, "Error");
  1512   if (_overflow_list == NULL) return false;
  1514   // Otherwise, there was something there; try claiming the list.
  1515   oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
  1516   // Trim off a prefix of at most objsFromOverflow items
  1517   Thread* tid = Thread::current();
  1518   size_t spin_count = (size_t)ParallelGCThreads;
  1519   size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
  1520   for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
  1521     // someone grabbed it before we did ...
  1522     // ... we spin for a short while...
  1523     os::sleep(tid, sleep_time_millis, false);
  1524     if (_overflow_list == NULL) {
  1525       // nothing left to take
  1526       return false;
  1527     } else if (_overflow_list != BUSY) {
  1528      // try and grab the prefix
  1529      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
  1532   if (prefix == NULL || prefix == BUSY) {
  1533      // Nothing to take or waited long enough
  1534      if (prefix == NULL) {
  1535        // Write back the NULL in case we overwrote it with BUSY above
  1536        // and it is still the same value.
  1537        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
  1539      return false;
  1541   assert(prefix != NULL && prefix != BUSY, "Error");
  1542   size_t i = 1;
  1543   oop cur = prefix;
  1544   while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
  1545     i++; cur = cur->list_ptr_from_klass();
  1548   // Reattach remaining (suffix) to overflow list
  1549   if (cur->klass_or_null() == NULL) {
  1550     // Write back the NULL in lieu of the BUSY we wrote
  1551     // above and it is still the same value.
  1552     if (_overflow_list == BUSY) {
  1553       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
  1555   } else {
  1556     assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error");
  1557     oop suffix = cur->list_ptr_from_klass();       // suffix will be put back on global list
  1558     cur->set_klass_to_list_ptr(NULL);     // break off suffix
  1559     // It's possible that the list is still in the empty(busy) state
  1560     // we left it in a short while ago; in that case we may be
  1561     // able to place back the suffix.
  1562     oop observed_overflow_list = _overflow_list;
  1563     oop cur_overflow_list = observed_overflow_list;
  1564     bool attached = false;
  1565     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
  1566       observed_overflow_list =
  1567         (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
  1568       if (cur_overflow_list == observed_overflow_list) {
  1569         attached = true;
  1570         break;
  1571       } else cur_overflow_list = observed_overflow_list;
  1573     if (!attached) {
  1574       // Too bad, someone else got in in between; we'll need to do a splice.
  1575       // Find the last item of suffix list
  1576       oop last = suffix;
  1577       while (last->klass_or_null() != NULL) {
  1578         last = last->list_ptr_from_klass();
  1580       // Atomically prepend suffix to current overflow list
  1581       observed_overflow_list = _overflow_list;
  1582       do {
  1583         cur_overflow_list = observed_overflow_list;
  1584         if (cur_overflow_list != BUSY) {
  1585           // Do the splice ...
  1586           last->set_klass_to_list_ptr(cur_overflow_list);
  1587         } else { // cur_overflow_list == BUSY
  1588           last->set_klass_to_list_ptr(NULL);
  1590         observed_overflow_list =
  1591           (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
  1592       } while (cur_overflow_list != observed_overflow_list);
  1596   // Push objects on prefix list onto this thread's work queue
  1597   assert(prefix != NULL && prefix != BUSY, "program logic");
  1598   cur = prefix;
  1599   ssize_t n = 0;
  1600   while (cur != NULL) {
  1601     oop obj_to_push = cur->forwardee();
  1602     oop next        = cur->list_ptr_from_klass();
  1603     cur->set_klass(obj_to_push->klass());
  1604     // This may be an array object that is self-forwarded. In that case, the list pointer
  1605     // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
  1606     if (!is_in_reserved(cur)) {
  1607       // This can become a scaling bottleneck when there is work queue overflow coincident
  1608       // with promotion failure.
  1609       oopDesc* f = cur;
  1610       FREE_C_HEAP_ARRAY(oopDesc, f, mtGC);
  1611     } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
  1612       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
  1613       obj_to_push = cur;
  1615     bool ok = work_q->push(obj_to_push);
  1616     assert(ok, "Should have succeeded");
  1617     cur = next;
  1618     n++;
  1620   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
  1621 #ifndef PRODUCT
  1622   assert(_num_par_pushes >= n, "Too many pops?");
  1623   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
  1624 #endif
  1625   return true;
  1627 #undef BUSY
  1629 void ParNewGeneration::ref_processor_init() {
  1630   if (_ref_processor == NULL) {
  1631     // Allocate and initialize a reference processor
  1632     _ref_processor =
  1633       new ReferenceProcessor(_reserved,                  // span
  1634                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
  1635                              (int) ParallelGCThreads,    // mt processing degree
  1636                              refs_discovery_is_mt(),     // mt discovery
  1637                              (int) ParallelGCThreads,    // mt discovery degree
  1638                              refs_discovery_is_atomic(), // atomic_discovery
  1639                              NULL,                       // is_alive_non_header
  1640                              false);                     // write barrier for next field updates
  1644 const char* ParNewGeneration::name() const {
  1645   return "par new generation";

mercurial